From c0ea9ec68c02a309717ba085df2a21115a29bee7 Mon Sep 17 00:00:00 2001 From: Dan Albert Date: Wed, 15 May 2024 13:06:19 -0700 Subject: [PATCH] Add absl. absl doesn't provide an AAR, and the AGP DSL for creating one can't describe the package correctly (it can't seem to find the libraries at all, and even if it could, it can't describe a single header directory shared by multiple libraries). Without that support, the only remaining option is to vendor it into the tree and include it directly in any CMake project that uses it. The downside of doing it this was is that absl will be rebuilt for every module rather than shared among them. An alternative would be importing android-base for AOSP, but that's a rather large porting task. Another alternative would be doing none of this and writing our own, much simpler subsets of those libraries. Mostly we want `CHECK`, `LOG`, and `DISALLOW_COPY_AND_ASSIGN`. libbase's logging.cpp has quite a few dependencies on other parts of libbase (file, parseint, strings, threads) and it quickly becomes a large project to import, but maybe those are only needed by APIs we don't actually care about. --- ARCHITECTURE.md | 14 + CMake/README.md | 6 + CMake/absl.cmake | 2 + .../AndroidApplicationConventionPlugin.kt | 7 + third_party/.clang-format | 2 + third_party/README.md | 13 + third_party/absl/.clang-format | 4 + .../.github/ISSUE_TEMPLATE/00-bug_report.yml | 53 + .../absl/.github/ISSUE_TEMPLATE/config.yml | 5 + .../absl/.github/PULL_REQUEST_TEMPLATE.md | 8 + third_party/absl/.gitignore | 17 + third_party/absl/ABSEIL_ISSUE_TEMPLATE.md | 22 + third_party/absl/AUTHORS | 6 + third_party/absl/BUILD.bazel | 25 + third_party/absl/CMake/AbseilDll.cmake | 837 ++++ third_party/absl/CMake/AbseilHelpers.cmake | 445 +++ .../absl/CMake/Googletest/CMakeLists.txt.in | 14 + .../absl/CMake/Googletest/DownloadGTest.cmake | 41 + third_party/absl/CMake/README.md | 188 + third_party/absl/CMake/abslConfig.cmake.in | 8 + .../CMake/install_test_project/CMakeLists.txt | 25 + .../absl/CMake/install_test_project/simple.cc | 32 + .../absl/CMake/install_test_project/test.sh | 112 + third_party/absl/CMakeLists.txt | 270 ++ third_party/absl/CONTRIBUTING.md | 141 + third_party/absl/FAQ.md | 167 + third_party/absl/LICENSE | 203 + third_party/absl/MODULE.bazel | 39 + third_party/absl/PrivacyInfo.xcprivacy | 14 + third_party/absl/README.md | 160 + third_party/absl/UPGRADES.md | 17 + third_party/absl/WORKSPACE | 59 + third_party/absl/absl/BUILD.bazel | 162 + third_party/absl/absl/CMakeLists.txt | 44 + third_party/absl/absl/abseil.podspec.gen.py | 243 ++ third_party/absl/absl/algorithm/BUILD.bazel | 88 + .../absl/absl/algorithm/CMakeLists.txt | 71 + third_party/absl/absl/algorithm/algorithm.h | 64 + .../absl/absl/algorithm/algorithm_test.cc | 50 + third_party/absl/absl/algorithm/container.h | 1760 +++++++++ .../absl/absl/algorithm/container_test.cc | 1147 ++++++ third_party/absl/absl/base/BUILD.bazel | 880 +++++ third_party/absl/absl/base/CMakeLists.txt | 739 ++++ third_party/absl/absl/base/attributes.h | 874 +++++ third_party/absl/absl/base/bit_cast_test.cc | 109 + third_party/absl/absl/base/call_once.h | 225 ++ third_party/absl/absl/base/call_once_test.cc | 107 + third_party/absl/absl/base/casts.h | 180 + third_party/absl/absl/base/config.h | 1004 +++++ third_party/absl/absl/base/config_test.cc | 60 + third_party/absl/absl/base/const_init.h | 76 + .../absl/absl/base/dynamic_annotations.h | 496 +++ .../base/exception_safety_testing_test.cc | 962 +++++ .../absl/absl/base/inline_variable_test.cc | 64 + .../absl/absl/base/inline_variable_test_a.cc | 27 + .../absl/absl/base/inline_variable_test_b.cc | 27 + .../absl/absl/base/internal/atomic_hook.h | 200 + .../absl/base/internal/atomic_hook_test.cc | 97 + .../base/internal/atomic_hook_test_helper.cc | 32 + .../base/internal/atomic_hook_test_helper.h | 34 + .../absl/base/internal/cmake_thread_test.cc | 22 + .../absl/absl/base/internal/cycleclock.cc | 77 + .../absl/absl/base/internal/cycleclock.h | 144 + .../absl/base/internal/cycleclock_config.h | 55 + .../absl/absl/base/internal/direct_mmap.h | 170 + .../absl/base/internal/dynamic_annotations.h | 398 ++ third_party/absl/absl/base/internal/endian.h | 283 ++ .../absl/absl/base/internal/endian_test.cc | 263 ++ .../absl/absl/base/internal/errno_saver.h | 43 + .../absl/base/internal/errno_saver_test.cc | 45 + .../base/internal/exception_safety_testing.cc | 79 + .../base/internal/exception_safety_testing.h | 1109 ++++++ .../absl/base/internal/exception_testing.h | 42 + .../absl/absl/base/internal/fast_type_id.h | 50 + .../absl/base/internal/fast_type_id_test.cc | 123 + .../absl/absl/base/internal/hide_ptr.h | 51 + .../absl/absl/base/internal/identity.h | 39 + .../absl/absl/base/internal/inline_variable.h | 108 + .../base/internal/inline_variable_testing.h | 46 + third_party/absl/absl/base/internal/invoke.h | 241 ++ .../absl/base/internal/low_level_alloc.cc | 631 +++ .../absl/absl/base/internal/low_level_alloc.h | 127 + .../base/internal/low_level_alloc_test.cc | 180 + .../absl/base/internal/low_level_scheduling.h | 134 + .../absl/base/internal/nullability_impl.h | 106 + .../absl/absl/base/internal/per_thread_tls.h | 52 + .../absl/absl/base/internal/pretty_function.h | 33 + .../absl/absl/base/internal/raw_logging.cc | 280 ++ .../absl/absl/base/internal/raw_logging.h | 217 ++ .../absl/absl/base/internal/scheduling_mode.h | 58 + .../absl/absl/base/internal/scoped_set_env.cc | 81 + .../absl/absl/base/internal/scoped_set_env.h | 45 + .../absl/base/internal/scoped_set_env_test.cc | 99 + .../absl/absl/base/internal/spinlock.cc | 232 ++ .../absl/absl/base/internal/spinlock.h | 265 ++ .../absl/base/internal/spinlock_akaros.inc | 35 + .../absl/base/internal/spinlock_benchmark.cc | 80 + .../absl/base/internal/spinlock_linux.inc | 71 + .../absl/base/internal/spinlock_posix.inc | 46 + .../absl/absl/base/internal/spinlock_wait.cc | 81 + .../absl/absl/base/internal/spinlock_wait.h | 95 + .../absl/base/internal/spinlock_win32.inc | 40 + .../absl/absl/base/internal/strerror.cc | 88 + .../absl/absl/base/internal/strerror.h | 39 + .../absl/base/internal/strerror_benchmark.cc | 29 + .../absl/absl/base/internal/strerror_test.cc | 88 + .../absl/absl/base/internal/sysinfo.cc | 489 +++ third_party/absl/absl/base/internal/sysinfo.h | 74 + .../absl/absl/base/internal/sysinfo_test.cc | 88 + .../absl/base/internal/thread_identity.cc | 163 + .../absl/absl/base/internal/thread_identity.h | 269 ++ .../internal/thread_identity_benchmark.cc | 38 + .../base/internal/thread_identity_test.cc | 129 + .../absl/absl/base/internal/throw_delegate.cc | 203 + .../absl/absl/base/internal/throw_delegate.h | 75 + .../absl/base/internal/tsan_mutex_interface.h | 68 + .../absl/base/internal/unaligned_access.h | 89 + .../base/internal/unique_small_name_test.cc | 77 + .../absl/base/internal/unscaledcycleclock.cc | 152 + .../absl/base/internal/unscaledcycleclock.h | 96 + .../base/internal/unscaledcycleclock_config.h | 62 + third_party/absl/absl/base/invoke_test.cc | 331 ++ third_party/absl/absl/base/log_severity.cc | 56 + third_party/absl/absl/base/log_severity.h | 185 + .../absl/absl/base/log_severity_test.cc | 251 ++ third_party/absl/absl/base/macros.h | 141 + third_party/absl/absl/base/no_destructor.h | 217 ++ .../absl/absl/base/no_destructor_benchmark.cc | 165 + .../absl/absl/base/no_destructor_test.cc | 209 + third_party/absl/absl/base/nullability.h | 224 ++ .../absl/absl/base/nullability_test.cc | 129 + third_party/absl/absl/base/optimization.h | 305 ++ .../absl/absl/base/optimization_test.cc | 129 + third_party/absl/absl/base/options.h | 258 ++ third_party/absl/absl/base/policy_checks.h | 113 + third_party/absl/absl/base/port.h | 25 + third_party/absl/absl/base/prefetch.h | 209 + third_party/absl/absl/base/prefetch_test.cc | 64 + .../absl/absl/base/raw_logging_test.cc | 79 + .../absl/absl/base/spinlock_test_common.cc | 285 ++ .../absl/absl/base/thread_annotations.h | 333 ++ .../absl/absl/base/throw_delegate_test.cc | 175 + third_party/absl/absl/cleanup/BUILD.bazel | 73 + third_party/absl/absl/cleanup/CMakeLists.txt | 56 + third_party/absl/absl/cleanup/cleanup.h | 140 + third_party/absl/absl/cleanup/cleanup_test.cc | 311 ++ .../absl/absl/cleanup/internal/cleanup.h | 100 + third_party/absl/absl/container/BUILD.bazel | 1085 ++++++ .../absl/absl/container/CMakeLists.txt | 989 +++++ .../absl/absl/container/btree_benchmark.cc | 764 ++++ third_party/absl/absl/container/btree_map.h | 887 +++++ third_party/absl/absl/container/btree_set.h | 821 ++++ third_party/absl/absl/container/btree_test.cc | 3461 +++++++++++++++++ third_party/absl/absl/container/btree_test.h | 166 + third_party/absl/absl/container/fixed_array.h | 556 +++ .../absl/container/fixed_array_benchmark.cc | 67 + .../fixed_array_exception_safety_test.cc | 201 + .../absl/absl/container/fixed_array_test.cc | 853 ++++ .../absl/absl/container/flat_hash_map.h | 617 +++ .../absl/absl/container/flat_hash_map_test.cc | 357 ++ .../absl/absl/container/flat_hash_set.h | 507 +++ .../absl/absl/container/flat_hash_set_test.cc | 243 ++ .../absl/absl/container/inlined_vector.h | 1002 +++++ .../container/inlined_vector_benchmark.cc | 829 ++++ .../inlined_vector_exception_safety_test.cc | 508 +++ .../absl/container/inlined_vector_test.cc | 2126 ++++++++++ .../absl/absl/container/internal/btree.h | 3053 +++++++++++++++ .../absl/container/internal/btree_container.h | 763 ++++ .../absl/absl/container/internal/common.h | 207 + .../container/internal/common_policy_traits.h | 134 + .../internal/common_policy_traits_test.cc | 134 + .../container/internal/compressed_tuple.h | 272 ++ .../internal/compressed_tuple_test.cc | 419 ++ .../container/internal/container_memory.h | 458 +++ .../internal/container_memory_test.cc | 286 ++ .../internal/hash_function_defaults.h | 209 + .../internal/hash_function_defaults_test.cc | 549 +++ .../internal/hash_generator_testing.cc | 78 + .../internal/hash_generator_testing.h | 182 + .../container/internal/hash_policy_testing.h | 184 + .../internal/hash_policy_testing_test.cc | 45 + .../container/internal/hash_policy_traits.h | 157 + .../internal/hash_policy_traits_test.cc | 80 + .../absl/container/internal/hashtable_debug.h | 102 + .../internal/hashtable_debug_hooks.h | 85 + .../container/internal/hashtablez_sampler.cc | 285 ++ .../container/internal/hashtablez_sampler.h | 257 ++ ...ashtablez_sampler_force_weak_definition.cc | 31 + .../internal/hashtablez_sampler_test.cc | 424 ++ .../absl/container/internal/inlined_vector.h | 1101 ++++++ .../absl/absl/container/internal/layout.h | 728 ++++ .../container/internal/layout_benchmark.cc | 122 + .../absl/container/internal/layout_test.cc | 1646 ++++++++ .../container/internal/node_slot_policy.h | 95 + .../internal/node_slot_policy_test.cc | 71 + .../absl/container/internal/raw_hash_map.h | 224 ++ .../absl/container/internal/raw_hash_set.cc | 380 ++ .../absl/container/internal/raw_hash_set.h | 3325 ++++++++++++++++ .../internal/raw_hash_set_allocator_test.cc | 514 +++ .../internal/raw_hash_set_benchmark.cc | 576 +++ .../internal/raw_hash_set_probe_benchmark.cc | 592 +++ .../container/internal/raw_hash_set_test.cc | 2684 +++++++++++++ .../absl/container/internal/test_allocator.h | 387 ++ .../internal/test_instance_tracker.cc | 29 + .../internal/test_instance_tracker.h | 274 ++ .../internal/test_instance_tracker_test.cc | 184 + .../absl/absl/container/internal/tracked.h | 83 + .../internal/unordered_map_constructor_test.h | 494 +++ .../internal/unordered_map_lookup_test.h | 117 + .../internal/unordered_map_members_test.h | 87 + .../internal/unordered_map_modifiers_test.h | 352 ++ .../container/internal/unordered_map_test.cc | 50 + .../internal/unordered_set_constructor_test.h | 496 +++ .../internal/unordered_set_lookup_test.h | 91 + .../internal/unordered_set_members_test.h | 86 + .../internal/unordered_set_modifiers_test.h | 221 ++ .../container/internal/unordered_set_test.cc | 41 + .../absl/absl/container/node_hash_map.h | 608 +++ .../absl/absl/container/node_hash_map_test.cc | 286 ++ .../absl/absl/container/node_hash_set.h | 504 +++ .../absl/absl/container/node_hash_set_test.cc | 143 + .../container/sample_element_size_test.cc | 114 + .../absl/copts/AbseilConfigureCopts.cmake | 106 + .../absl/copts/GENERATED_AbseilCopts.cmake | 229 ++ .../absl/absl/copts/GENERATED_copts.bzl | 230 ++ .../absl/absl/copts/configure_copts.bzl | 82 + third_party/absl/absl/copts/copts.py | 191 + third_party/absl/absl/copts/generate_copts.py | 109 + third_party/absl/absl/crc/BUILD.bazel | 220 ++ third_party/absl/absl/crc/CMakeLists.txt | 175 + third_party/absl/absl/crc/crc32c.cc | 99 + third_party/absl/absl/crc/crc32c.h | 190 + third_party/absl/absl/crc/crc32c_benchmark.cc | 183 + third_party/absl/absl/crc/crc32c_test.cc | 227 ++ .../absl/absl/crc/internal/cpu_detect.cc | 286 ++ .../absl/absl/crc/internal/cpu_detect.h | 63 + third_party/absl/absl/crc/internal/crc.cc | 437 +++ third_party/absl/absl/crc/internal/crc.h | 83 + .../internal/crc32_x86_arm_combined_simd.h | 298 ++ third_party/absl/absl/crc/internal/crc32c.h | 39 + .../absl/absl/crc/internal/crc32c_inline.h | 72 + .../absl/absl/crc/internal/crc_cord_state.cc | 130 + .../absl/absl/crc/internal/crc_cord_state.h | 159 + .../absl/crc/internal/crc_cord_state_test.cc | 124 + .../absl/absl/crc/internal/crc_internal.h | 177 + .../absl/absl/crc/internal/crc_memcpy.h | 122 + .../absl/crc/internal/crc_memcpy_fallback.cc | 77 + .../absl/absl/crc/internal/crc_memcpy_test.cc | 177 + .../internal/crc_memcpy_x86_arm_combined.cc | 450 +++ .../crc/internal/crc_non_temporal_memcpy.cc | 93 + .../absl/crc/internal/crc_x86_arm_combined.cc | 733 ++++ .../internal/non_temporal_arm_intrinsics.h | 79 + .../absl/crc/internal/non_temporal_memcpy.h | 180 + .../crc/internal/non_temporal_memcpy_test.cc | 88 + third_party/absl/absl/debugging/BUILD.bazel | 342 ++ .../absl/absl/debugging/CMakeLists.txt | 298 ++ .../absl/debugging/failure_signal_handler.cc | 405 ++ .../absl/debugging/failure_signal_handler.h | 121 + .../debugging/failure_signal_handler_test.cc | 166 + .../debugging/internal/address_is_readable.cc | 98 + .../debugging/internal/address_is_readable.h | 32 + .../absl/absl/debugging/internal/demangle.cc | 2012 ++++++++++ .../absl/absl/debugging/internal/demangle.h | 73 + .../absl/debugging/internal/demangle_test.cc | 255 ++ .../absl/debugging/internal/elf_mem_image.cc | 386 ++ .../absl/debugging/internal/elf_mem_image.h | 140 + .../absl/debugging/internal/examine_stack.cc | 320 ++ .../absl/debugging/internal/examine_stack.h | 64 + .../debugging/internal/stack_consumption.cc | 206 + .../debugging/internal/stack_consumption.h | 50 + .../internal/stack_consumption_test.cc | 50 + .../internal/stacktrace_aarch64-inl.inc | 266 ++ .../debugging/internal/stacktrace_arm-inl.inc | 139 + .../debugging/internal/stacktrace_config.h | 88 + .../internal/stacktrace_emscripten-inl.inc | 110 + .../internal/stacktrace_generic-inl.inc | 108 + .../internal/stacktrace_powerpc-inl.inc | 258 ++ .../internal/stacktrace_riscv-inl.inc | 191 + .../internal/stacktrace_unimplemented-inl.inc | 24 + .../internal/stacktrace_win32-inl.inc | 94 + .../debugging/internal/stacktrace_x86-inl.inc | 394 ++ .../absl/absl/debugging/internal/symbolize.h | 153 + .../absl/debugging/internal/vdso_support.cc | 205 + .../absl/debugging/internal/vdso_support.h | 158 + third_party/absl/absl/debugging/leak_check.cc | 73 + third_party/absl/absl/debugging/leak_check.h | 150 + .../absl/debugging/leak_check_fail_test.cc | 41 + .../absl/absl/debugging/leak_check_test.cc | 41 + third_party/absl/absl/debugging/stacktrace.cc | 142 + third_party/absl/absl/debugging/stacktrace.h | 231 ++ .../absl/debugging/stacktrace_benchmark.cc | 55 + .../absl/absl/debugging/stacktrace_test.cc | 47 + third_party/absl/absl/debugging/symbolize.cc | 43 + third_party/absl/absl/debugging/symbolize.h | 99 + .../absl/absl/debugging/symbolize_darwin.inc | 102 + .../absl/absl/debugging/symbolize_elf.inc | 1725 ++++++++ .../absl/debugging/symbolize_emscripten.inc | 75 + .../absl/absl/debugging/symbolize_test.cc | 638 +++ .../debugging/symbolize_unimplemented.inc | 40 + .../absl/absl/debugging/symbolize_win32.inc | 82 + third_party/absl/absl/flags/BUILD.bazel | 601 +++ third_party/absl/absl/flags/CMakeLists.txt | 471 +++ .../absl/absl/flags/commandlineflag.cc | 34 + third_party/absl/absl/flags/commandlineflag.h | 200 + .../absl/absl/flags/commandlineflag_test.cc | 231 ++ third_party/absl/absl/flags/config.h | 68 + third_party/absl/absl/flags/config_test.cc | 61 + third_party/absl/absl/flags/declare.h | 68 + third_party/absl/absl/flags/flag.h | 301 ++ third_party/absl/absl/flags/flag_benchmark.cc | 251 ++ .../absl/absl/flags/flag_benchmark.lds | 13 + third_party/absl/absl/flags/flag_test.cc | 1217 ++++++ third_party/absl/absl/flags/flag_test_defs.cc | 24 + .../absl/flags/internal/commandlineflag.cc | 26 + .../absl/flags/internal/commandlineflag.h | 68 + third_party/absl/absl/flags/internal/flag.cc | 615 +++ third_party/absl/absl/flags/internal/flag.h | 796 ++++ third_party/absl/absl/flags/internal/parse.h | 70 + .../absl/absl/flags/internal/path_util.h | 62 + .../absl/flags/internal/path_util_test.cc | 46 + .../flags/internal/private_handle_accessor.cc | 65 + .../flags/internal/private_handle_accessor.h | 61 + .../absl/absl/flags/internal/program_name.cc | 60 + .../absl/absl/flags/internal/program_name.h | 50 + .../absl/flags/internal/program_name_test.cc | 61 + .../absl/absl/flags/internal/registry.h | 97 + .../absl/absl/flags/internal/sequence_lock.h | 187 + .../absl/flags/internal/sequence_lock_test.cc | 169 + third_party/absl/absl/flags/internal/usage.cc | 555 +++ third_party/absl/absl/flags/internal/usage.h | 106 + .../absl/absl/flags/internal/usage_test.cc | 544 +++ third_party/absl/absl/flags/marshalling.cc | 291 ++ third_party/absl/absl/flags/marshalling.h | 361 ++ .../absl/absl/flags/marshalling_test.cc | 1220 ++++++ third_party/absl/absl/flags/parse.cc | 943 +++++ third_party/absl/absl/flags/parse.h | 130 + third_party/absl/absl/flags/parse_test.cc | 1087 ++++++ third_party/absl/absl/flags/reflection.cc | 355 ++ third_party/absl/absl/flags/reflection.h | 90 + .../absl/absl/flags/reflection_test.cc | 265 ++ third_party/absl/absl/flags/usage.cc | 66 + third_party/absl/absl/flags/usage.h | 43 + third_party/absl/absl/flags/usage_config.cc | 165 + third_party/absl/absl/flags/usage_config.h | 135 + .../absl/absl/flags/usage_config_test.cc | 205 + third_party/absl/absl/functional/BUILD.bazel | 168 + .../absl/absl/functional/CMakeLists.txt | 134 + .../absl/absl/functional/any_invocable.h | 324 ++ .../absl/functional/any_invocable_test.cc | 1719 ++++++++ third_party/absl/absl/functional/bind_front.h | 193 + .../absl/absl/functional/bind_front_test.cc | 231 ++ .../absl/absl/functional/function_ref.h | 151 + .../absl/absl/functional/function_ref_test.cc | 293 ++ .../functional/function_type_benchmark.cc | 176 + .../absl/functional/internal/any_invocable.h | 891 +++++ .../absl/functional/internal/front_binder.h | 95 + .../absl/functional/internal/function_ref.h | 116 + third_party/absl/absl/functional/overload.h | 75 + .../absl/absl/functional/overload_test.cc | 130 + third_party/absl/absl/hash/BUILD.bazel | 217 ++ third_party/absl/absl/hash/CMakeLists.txt | 186 + third_party/absl/absl/hash/hash.h | 424 ++ third_party/absl/absl/hash/hash_benchmark.cc | 323 ++ .../absl/absl/hash/hash_instantiated_test.cc | 224 ++ third_party/absl/absl/hash/hash_test.cc | 1148 ++++++ third_party/absl/absl/hash/hash_testing.h | 378 ++ third_party/absl/absl/hash/internal/city.cc | 349 ++ third_party/absl/absl/hash/internal/city.h | 78 + .../absl/absl/hash/internal/city_test.cc | 597 +++ third_party/absl/absl/hash/internal/hash.cc | 69 + third_party/absl/absl/hash/internal/hash.h | 1375 +++++++ .../absl/absl/hash/internal/hash_test.h | 87 + .../absl/absl/hash/internal/low_level_hash.cc | 118 + .../absl/absl/hash/internal/low_level_hash.h | 50 + .../absl/hash/internal/low_level_hash_test.cc | 532 +++ .../absl/absl/hash/internal/print_hash_of.cc | 23 + .../absl/absl/hash/internal/spy_hash_state.h | 266 ++ third_party/absl/absl/log/BUILD.bazel | 676 ++++ third_party/absl/absl/log/CMakeLists.txt | 1144 ++++++ third_party/absl/absl/log/absl_check.h | 117 + third_party/absl/absl/log/absl_check_test.cc | 58 + third_party/absl/absl/log/absl_log.h | 115 + .../absl/absl/log/absl_log_basic_test.cc | 21 + third_party/absl/absl/log/absl_vlog_is_on.h | 93 + third_party/absl/absl/log/check.h | 209 + third_party/absl/absl/log/check_test.cc | 58 + third_party/absl/absl/log/check_test_impl.inc | 528 +++ third_party/absl/absl/log/die_if_null.cc | 32 + third_party/absl/absl/log/die_if_null.h | 76 + third_party/absl/absl/log/die_if_null_test.cc | 107 + third_party/absl/absl/log/flags.cc | 143 + third_party/absl/absl/log/flags.h | 43 + third_party/absl/absl/log/flags_test.cc | 188 + third_party/absl/absl/log/globals.cc | 178 + third_party/absl/absl/log/globals.h | 218 ++ third_party/absl/absl/log/globals_test.cc | 147 + third_party/absl/absl/log/initialize.cc | 38 + third_party/absl/absl/log/initialize.h | 45 + .../absl/absl/log/internal/BUILD.bazel | 471 +++ .../absl/absl/log/internal/append_truncated.h | 47 + .../absl/absl/log/internal/check_impl.h | 150 + .../absl/absl/log/internal/check_op.cc | 118 + third_party/absl/absl/log/internal/check_op.h | 420 ++ .../absl/absl/log/internal/conditions.cc | 83 + .../absl/absl/log/internal/conditions.h | 239 ++ third_party/absl/absl/log/internal/config.h | 45 + third_party/absl/absl/log/internal/flags.h | 59 + third_party/absl/absl/log/internal/fnmatch.cc | 73 + third_party/absl/absl/log/internal/fnmatch.h | 35 + .../absl/log/internal/fnmatch_benchmark.cc | 29 + .../absl/absl/log/internal/fnmatch_test.cc | 59 + third_party/absl/absl/log/internal/globals.cc | 145 + third_party/absl/absl/log/internal/globals.h | 101 + .../absl/absl/log/internal/log_format.cc | 205 + .../absl/absl/log/internal/log_format.h | 78 + third_party/absl/absl/log/internal/log_impl.h | 282 ++ .../absl/absl/log/internal/log_message.cc | 633 +++ .../absl/absl/log/internal/log_message.h | 375 ++ .../absl/absl/log/internal/log_sink_set.cc | 296 ++ .../absl/absl/log/internal/log_sink_set.h | 54 + .../absl/absl/log/internal/nullguard.cc | 35 + .../absl/absl/log/internal/nullguard.h | 88 + .../absl/absl/log/internal/nullstream.h | 136 + third_party/absl/absl/log/internal/proto.cc | 220 ++ third_party/absl/absl/log/internal/proto.h | 288 ++ .../absl/log/internal/stderr_log_sink_test.cc | 105 + third_party/absl/absl/log/internal/strip.h | 72 + .../absl/absl/log/internal/structured.h | 58 + .../absl/absl/log/internal/test_actions.cc | 75 + .../absl/absl/log/internal/test_actions.h | 90 + .../absl/absl/log/internal/test_helpers.cc | 82 + .../absl/absl/log/internal/test_helpers.h | 71 + .../absl/absl/log/internal/test_matchers.cc | 217 ++ .../absl/absl/log/internal/test_matchers.h | 94 + .../absl/absl/log/internal/vlog_config.cc | 340 ++ .../absl/absl/log/internal/vlog_config.h | 163 + .../log/internal/vlog_config_benchmark.cc | 187 + third_party/absl/absl/log/internal/voidify.h | 44 + third_party/absl/absl/log/log.h | 361 ++ third_party/absl/absl/log/log_basic_test.cc | 21 + .../absl/absl/log/log_basic_test_impl.inc | 545 +++ third_party/absl/absl/log/log_benchmark.cc | 97 + third_party/absl/absl/log/log_entry.cc | 41 + third_party/absl/absl/log/log_entry.h | 221 ++ third_party/absl/absl/log/log_entry_test.cc | 468 +++ third_party/absl/absl/log/log_format_test.cc | 1872 +++++++++ .../absl/absl/log/log_macro_hygiene_test.cc | 187 + .../absl/log/log_modifier_methods_test.cc | 233 ++ third_party/absl/absl/log/log_sink.cc | 23 + third_party/absl/absl/log/log_sink.h | 64 + third_party/absl/absl/log/log_sink_registry.h | 61 + third_party/absl/absl/log/log_sink_test.cc | 418 ++ third_party/absl/absl/log/log_streamer.h | 181 + .../absl/absl/log/log_streamer_test.cc | 416 ++ third_party/absl/absl/log/scoped_mock_log.cc | 86 + third_party/absl/absl/log/scoped_mock_log.h | 197 + .../absl/absl/log/scoped_mock_log_test.cc | 295 ++ third_party/absl/absl/log/stripping_test.cc | 502 +++ third_party/absl/absl/log/structured.h | 70 + third_party/absl/absl/log/structured_test.cc | 63 + third_party/absl/absl/log/vlog_is_on.h | 72 + third_party/absl/absl/log/vlog_is_on_test.cc | 176 + third_party/absl/absl/memory/BUILD.bazel | 60 + third_party/absl/absl/memory/CMakeLists.txt | 41 + third_party/absl/absl/memory/memory.h | 278 ++ third_party/absl/absl/memory/memory_test.cc | 222 ++ third_party/absl/absl/meta/BUILD.bazel | 59 + third_party/absl/absl/meta/CMakeLists.txt | 54 + third_party/absl/absl/meta/type_traits.h | 564 +++ .../absl/absl/meta/type_traits_test.cc | 841 ++++ third_party/absl/absl/numeric/BUILD.bazel | 139 + third_party/absl/absl/numeric/CMakeLists.txt | 100 + third_party/absl/absl/numeric/bits.h | 196 + .../absl/absl/numeric/bits_benchmark.cc | 73 + third_party/absl/absl/numeric/bits_test.cc | 641 +++ third_party/absl/absl/numeric/int128.cc | 399 ++ third_party/absl/absl/numeric/int128.h | 1162 ++++++ .../absl/absl/numeric/int128_benchmark.cc | 282 ++ .../absl/numeric/int128_have_intrinsic.inc | 293 ++ .../absl/absl/numeric/int128_no_intrinsic.inc | 328 ++ .../absl/absl/numeric/int128_stream_test.cc | 1400 +++++++ third_party/absl/absl/numeric/int128_test.cc | 1285 ++++++ third_party/absl/absl/numeric/internal/bits.h | 358 ++ .../absl/numeric/internal/representation.h | 55 + third_party/absl/absl/profiling/BUILD.bazel | 140 + .../absl/absl/profiling/CMakeLists.txt | 93 + .../profiling/internal/exponential_biased.cc | 93 + .../profiling/internal/exponential_biased.h | 130 + .../internal/exponential_biased_test.cc | 203 + .../profiling/internal/periodic_sampler.cc | 53 + .../profiling/internal/periodic_sampler.h | 211 + .../internal/periodic_sampler_benchmark.cc | 79 + .../internal/periodic_sampler_test.cc | 177 + .../absl/profiling/internal/sample_recorder.h | 253 ++ .../internal/sample_recorder_test.cc | 184 + third_party/absl/absl/random/BUILD.bazel | 541 +++ third_party/absl/absl/random/CMakeLists.txt | 1219 ++++++ third_party/absl/absl/random/benchmarks.cc | 383 ++ .../absl/absl/random/bernoulli_distribution.h | 200 + .../random/bernoulli_distribution_test.cc | 217 ++ .../absl/absl/random/beta_distribution.h | 427 ++ .../absl/random/beta_distribution_test.cc | 615 +++ third_party/absl/absl/random/bit_gen_ref.h | 185 + .../absl/absl/random/bit_gen_ref_test.cc | 102 + .../absl/absl/random/discrete_distribution.cc | 98 + .../absl/absl/random/discrete_distribution.h | 247 ++ .../absl/random/discrete_distribution_test.cc | 251 ++ third_party/absl/absl/random/distributions.h | 452 +++ .../absl/absl/random/distributions_test.cc | 466 +++ third_party/absl/absl/random/examples_test.cc | 99 + .../absl/random/exponential_distribution.h | 165 + .../random/exponential_distribution_test.cc | 426 ++ .../absl/absl/random/gaussian_distribution.cc | 104 + .../absl/absl/random/gaussian_distribution.h | 275 ++ .../absl/random/gaussian_distribution_test.cc | 561 +++ .../absl/absl/random/generators_test.cc | 186 + .../absl/absl/random/internal/BUILD.bazel | 784 ++++ .../absl/absl/random/internal/chi_square.cc | 233 ++ .../absl/absl/random/internal/chi_square.h | 89 + .../absl/random/internal/chi_square_test.cc | 365 ++ .../random/internal/distribution_caller.h | 95 + .../random/internal/distribution_test_util.cc | 418 ++ .../random/internal/distribution_test_util.h | 113 + .../internal/distribution_test_util_test.cc | 193 + .../absl/random/internal/explicit_seed_seq.h | 92 + .../random/internal/explicit_seed_seq_test.cc | 237 ++ .../absl/random/internal/fast_uniform_bits.h | 271 ++ .../random/internal/fast_uniform_bits_test.cc | 336 ++ .../absl/absl/random/internal/fastmath.h | 57 + .../absl/random/internal/fastmath_test.cc | 97 + .../gaussian_distribution_gentables.cc | 143 + .../absl/absl/random/internal/generate_real.h | 144 + .../random/internal/generate_real_test.cc | 496 +++ .../random/internal/iostream_state_saver.h | 245 ++ .../internal/iostream_state_saver_test.cc | 373 ++ .../absl/absl/random/internal/mock_helpers.h | 135 + .../absl/random/internal/mock_overload_set.h | 100 + .../absl/random/internal/nanobenchmark.cc | 804 ++++ .../absl/absl/random/internal/nanobenchmark.h | 172 + .../random/internal/nanobenchmark_test.cc | 79 + .../absl/random/internal/nonsecure_base.h | 161 + .../random/internal/nonsecure_base_test.cc | 227 ++ .../absl/absl/random/internal/pcg_engine.h | 287 ++ .../absl/random/internal/pcg_engine_test.cc | 638 +++ .../absl/absl/random/internal/platform.h | 171 + .../absl/absl/random/internal/pool_urbg.cc | 253 ++ .../absl/absl/random/internal/pool_urbg.h | 131 + .../absl/random/internal/pool_urbg_test.cc | 182 + .../absl/absl/random/internal/randen.cc | 91 + .../absl/absl/random/internal/randen.h | 96 + .../absl/random/internal/randen_benchmarks.cc | 176 + .../absl/random/internal/randen_detect.cc | 229 ++ .../absl/absl/random/internal/randen_detect.h | 33 + .../absl/absl/random/internal/randen_engine.h | 264 ++ .../random/internal/randen_engine_test.cc | 655 ++++ .../absl/absl/random/internal/randen_hwaes.cc | 526 +++ .../absl/absl/random/internal/randen_hwaes.h | 50 + .../absl/random/internal/randen_hwaes_test.cc | 99 + .../absl/random/internal/randen_round_keys.cc | 462 +++ .../absl/absl/random/internal/randen_slow.cc | 471 +++ .../absl/absl/random/internal/randen_slow.h | 40 + .../absl/random/internal/randen_slow_test.cc | 61 + .../absl/absl/random/internal/randen_test.cc | 75 + .../absl/absl/random/internal/randen_traits.h | 88 + .../absl/random/internal/salted_seed_seq.h | 165 + .../random/internal/salted_seed_seq_test.cc | 168 + .../absl/random/internal/seed_material.cc | 267 ++ .../absl/absl/random/internal/seed_material.h | 104 + .../random/internal/seed_material_test.cc | 202 + .../absl/absl/random/internal/sequence_urbg.h | 60 + .../absl/absl/random/internal/traits.h | 149 + .../absl/absl/random/internal/traits_test.cc | 126 + .../absl/random/internal/uniform_helper.h | 244 ++ .../random/internal/uniform_helper_test.cc | 279 ++ .../absl/absl/random/internal/wide_multiply.h | 96 + .../random/internal/wide_multiply_test.cc | 119 + .../random/log_uniform_int_distribution.h | 256 ++ .../log_uniform_int_distribution_test.cc | 277 ++ .../absl/absl/random/mock_distributions.h | 266 ++ .../absl/random/mock_distributions_test.cc | 72 + .../absl/absl/random/mocking_bit_gen.h | 240 ++ .../absl/absl/random/mocking_bit_gen_test.cc | 394 ++ .../absl/absl/random/poisson_distribution.h | 261 ++ .../absl/random/poisson_distribution_test.cc | 570 +++ third_party/absl/absl/random/random.h | 189 + .../absl/absl/random/seed_gen_exception.cc | 46 + .../absl/absl/random/seed_gen_exception.h | 55 + .../absl/absl/random/seed_sequences.cc | 29 + third_party/absl/absl/random/seed_sequences.h | 111 + .../absl/absl/random/seed_sequences_test.cc | 126 + .../absl/random/uniform_int_distribution.h | 275 ++ .../random/uniform_int_distribution_test.cc | 259 ++ .../absl/random/uniform_real_distribution.h | 202 + .../random/uniform_real_distribution_test.cc | 394 ++ .../absl/absl/random/zipf_distribution.h | 272 ++ .../absl/random/zipf_distribution_test.cc | 423 ++ third_party/absl/absl/status/BUILD.bazel | 131 + third_party/absl/absl/status/CMakeLists.txt | 104 + .../absl/status/internal/status_internal.cc | 248 ++ .../absl/status/internal/status_internal.h | 132 + .../absl/status/internal/statusor_internal.h | 435 +++ third_party/absl/absl/status/status.cc | 425 ++ third_party/absl/absl/status/status.h | 943 +++++ .../absl/status/status_payload_printer.cc | 36 + .../absl/absl/status/status_payload_printer.h | 52 + third_party/absl/absl/status/status_test.cc | 579 +++ third_party/absl/absl/status/statusor.cc | 106 + third_party/absl/absl/status/statusor.h | 830 ++++ third_party/absl/absl/status/statusor_test.cc | 1921 +++++++++ third_party/absl/absl/strings/BUILD.bazel | 1475 +++++++ third_party/absl/absl/strings/CMakeLists.txt | 1193 ++++++ third_party/absl/absl/strings/ascii.cc | 320 ++ third_party/absl/absl/strings/ascii.h | 244 ++ .../absl/absl/strings/ascii_benchmark.cc | 132 + third_party/absl/absl/strings/ascii_test.cc | 358 ++ .../absl/absl/strings/atod_manual_test.cc | 193 + .../absl/absl/strings/char_formatting_test.cc | 169 + third_party/absl/absl/strings/charconv.cc | 1446 +++++++ third_party/absl/absl/strings/charconv.h | 123 + .../absl/absl/strings/charconv_benchmark.cc | 204 + .../absl/absl/strings/charconv_test.cc | 787 ++++ third_party/absl/absl/strings/charset.h | 164 + .../absl/absl/strings/charset_benchmark.cc | 57 + third_party/absl/absl/strings/charset_test.cc | 181 + third_party/absl/absl/strings/cord.cc | 1576 ++++++++ third_party/absl/absl/strings/cord.h | 1706 ++++++++ .../absl/absl/strings/cord_analysis.cc | 196 + third_party/absl/absl/strings/cord_analysis.h | 63 + third_party/absl/absl/strings/cord_buffer.cc | 30 + third_party/absl/absl/strings/cord_buffer.h | 572 +++ .../absl/absl/strings/cord_buffer_test.cc | 322 ++ third_party/absl/absl/strings/cord_test.cc | 3320 ++++++++++++++++ .../absl/absl/strings/cord_test_helpers.h | 122 + third_party/absl/absl/strings/cordz_test.cc | 468 +++ .../absl/absl/strings/cordz_test_helpers.h | 153 + third_party/absl/absl/strings/escaping.cc | 957 +++++ third_party/absl/absl/strings/escaping.h | 169 + .../absl/absl/strings/escaping_benchmark.cc | 98 + .../absl/absl/strings/escaping_test.cc | 714 ++++ .../absl/absl/strings/has_absl_stringify.h | 63 + .../absl/strings/has_absl_stringify_test.cc | 40 + .../absl/absl/strings/has_ostream_operator.h | 42 + .../absl/strings/has_ostream_operator_test.cc | 41 + .../absl/strings/internal/charconv_bigint.cc | 357 ++ .../absl/strings/internal/charconv_bigint.h | 423 ++ .../strings/internal/charconv_bigint_test.cc | 260 ++ .../absl/strings/internal/charconv_parse.cc | 504 +++ .../absl/strings/internal/charconv_parse.h | 99 + .../strings/internal/charconv_parse_test.cc | 357 ++ .../absl/strings/internal/cord_data_edge.h | 63 + .../strings/internal/cord_data_edge_test.cc | 130 + .../absl/strings/internal/cord_internal.cc | 70 + .../absl/strings/internal/cord_internal.h | 891 +++++ .../absl/strings/internal/cord_rep_btree.cc | 1241 ++++++ .../absl/strings/internal/cord_rep_btree.h | 944 +++++ .../internal/cord_rep_btree_navigator.cc | 187 + .../internal/cord_rep_btree_navigator.h | 267 ++ .../internal/cord_rep_btree_navigator_test.cc | 346 ++ .../strings/internal/cord_rep_btree_reader.cc | 69 + .../strings/internal/cord_rep_btree_reader.h | 212 + .../internal/cord_rep_btree_reader_test.cc | 293 ++ .../strings/internal/cord_rep_btree_test.cc | 1568 ++++++++ .../absl/strings/internal/cord_rep_consume.cc | 64 + .../absl/strings/internal/cord_rep_consume.h | 47 + .../absl/strings/internal/cord_rep_crc.cc | 56 + .../absl/absl/strings/internal/cord_rep_crc.h | 103 + .../strings/internal/cord_rep_crc_test.cc | 130 + .../absl/strings/internal/cord_rep_flat.h | 195 + .../strings/internal/cord_rep_test_util.h | 205 + .../absl/strings/internal/cordz_functions.cc | 96 + .../absl/strings/internal/cordz_functions.h | 77 + .../strings/internal/cordz_functions_test.cc | 149 + .../absl/strings/internal/cordz_handle.cc | 165 + .../absl/absl/strings/internal/cordz_handle.h | 98 + .../strings/internal/cordz_handle_test.cc | 265 ++ .../absl/absl/strings/internal/cordz_info.cc | 417 ++ .../absl/absl/strings/internal/cordz_info.h | 298 ++ .../internal/cordz_info_statistics_test.cc | 510 +++ .../absl/strings/internal/cordz_info_test.cc | 342 ++ .../strings/internal/cordz_sample_token.cc | 64 + .../strings/internal/cordz_sample_token.h | 97 + .../internal/cordz_sample_token_test.cc | 208 + .../absl/strings/internal/cordz_statistics.h | 88 + .../strings/internal/cordz_update_scope.h | 71 + .../internal/cordz_update_scope_test.cc | 49 + .../strings/internal/cordz_update_tracker.h | 123 + .../internal/cordz_update_tracker_test.cc | 147 + .../internal/damerau_levenshtein_distance.cc | 93 + .../internal/damerau_levenshtein_distance.h | 34 + .../damerau_levenshtein_distance_test.cc | 99 + .../absl/absl/strings/internal/escaping.cc | 206 + .../absl/absl/strings/internal/escaping.h | 57 + .../strings/internal/escaping_test_common.h | 133 + .../strings/internal/has_absl_stringify.h | 44 + .../absl/absl/strings/internal/memutil.cc | 48 + .../absl/absl/strings/internal/memutil.h | 40 + .../strings/internal/memutil_benchmark.cc | 128 + .../absl/strings/internal/memutil_test.cc | 41 + .../strings/internal/numbers_test_common.h | 184 + .../absl/strings/internal/ostringstream.cc | 43 + .../absl/strings/internal/ostringstream.h | 114 + .../internal/ostringstream_benchmark.cc | 106 + .../strings/internal/ostringstream_test.cc | 131 + .../absl/strings/internal/pow10_helper.cc | 122 + .../absl/absl/strings/internal/pow10_helper.h | 40 + .../strings/internal/pow10_helper_test.cc | 122 + .../strings/internal/resize_uninitialized.h | 119 + .../internal/resize_uninitialized_test.cc | 133 + .../absl/strings/internal/stl_type_traits.h | 248 ++ .../absl/strings/internal/str_format/arg.cc | 671 ++++ .../absl/strings/internal/str_format/arg.h | 671 ++++ .../strings/internal/str_format/arg_test.cc | 162 + .../absl/strings/internal/str_format/bind.cc | 275 ++ .../absl/strings/internal/str_format/bind.h | 237 ++ .../strings/internal/str_format/bind_test.cc | 157 + .../strings/internal/str_format/checker.h | 100 + .../internal/str_format/checker_test.cc | 176 + .../internal/str_format/constexpr_parser.h | 357 ++ .../internal/str_format/convert_test.cc | 1486 +++++++ .../strings/internal/str_format/extension.cc | 75 + .../strings/internal/str_format/extension.h | 456 +++ .../internal/str_format/extension_test.cc | 109 + .../internal/str_format/float_conversion.cc | 1457 +++++++ .../internal/str_format/float_conversion.h | 37 + .../strings/internal/str_format/output.cc | 72 + .../absl/strings/internal/str_format/output.h | 97 + .../internal/str_format/output_test.cc | 79 + .../strings/internal/str_format/parser.cc | 140 + .../absl/strings/internal/str_format/parser.h | 269 ++ .../internal/str_format/parser_test.cc | 446 +++ .../absl/strings/internal/str_join_internal.h | 317 ++ .../strings/internal/str_split_internal.h | 481 +++ .../absl/strings/internal/string_constant.h | 72 + .../strings/internal/string_constant_test.cc | 60 + .../absl/strings/internal/stringify_sink.cc | 28 + .../absl/strings/internal/stringify_sink.h | 57 + .../absl/absl/strings/internal/utf8.cc | 53 + third_party/absl/absl/strings/internal/utf8.h | 50 + .../absl/absl/strings/internal/utf8_test.cc | 66 + third_party/absl/absl/strings/match.cc | 133 + third_party/absl/absl/strings/match.h | 119 + third_party/absl/absl/strings/match_test.cc | 291 ++ third_party/absl/absl/strings/numbers.cc | 1371 +++++++ third_party/absl/absl/strings/numbers.h | 456 +++ .../absl/absl/strings/numbers_benchmark.cc | 288 ++ third_party/absl/absl/strings/numbers_test.cc | 1747 +++++++++ third_party/absl/absl/strings/str_cat.cc | 336 ++ third_party/absl/absl/strings/str_cat.h | 631 +++ .../absl/absl/strings/str_cat_benchmark.cc | 265 ++ third_party/absl/absl/strings/str_cat_test.cc | 730 ++++ third_party/absl/absl/strings/str_format.h | 887 +++++ .../absl/absl/strings/str_format_test.cc | 1224 ++++++ third_party/absl/absl/strings/str_join.h | 287 ++ .../absl/absl/strings/str_join_benchmark.cc | 97 + .../absl/absl/strings/str_join_test.cc | 609 +++ third_party/absl/absl/strings/str_replace.cc | 91 + third_party/absl/absl/strings/str_replace.h | 222 ++ .../absl/strings/str_replace_benchmark.cc | 122 + .../absl/absl/strings/str_replace_test.cc | 345 ++ third_party/absl/absl/strings/str_split.cc | 144 + third_party/absl/absl/strings/str_split.h | 565 +++ .../absl/absl/strings/str_split_benchmark.cc | 181 + .../absl/absl/strings/str_split_test.cc | 1023 +++++ third_party/absl/absl/strings/string_view.cc | 262 ++ third_party/absl/absl/strings/string_view.h | 769 ++++ .../absl/strings/string_view_benchmark.cc | 382 ++ .../absl/absl/strings/string_view_test.cc | 1381 +++++++ third_party/absl/absl/strings/strip.h | 96 + third_party/absl/absl/strings/strip_test.cc | 198 + third_party/absl/absl/strings/substitute.cc | 182 + third_party/absl/absl/strings/substitute.h | 767 ++++ .../absl/absl/strings/substitute_test.cc | 288 ++ .../absl/absl/synchronization/BUILD.bazel | 397 ++ .../absl/absl/synchronization/CMakeLists.txt | 284 ++ .../absl/absl/synchronization/barrier.cc | 52 + .../absl/absl/synchronization/barrier.h | 79 + .../absl/absl/synchronization/barrier_test.cc | 75 + .../absl/synchronization/blocking_counter.cc | 67 + .../absl/synchronization/blocking_counter.h | 101 + .../blocking_counter_benchmark.cc | 84 + .../synchronization/blocking_counter_test.cc | 80 + .../internal/create_thread_identity.cc | 148 + .../internal/create_thread_identity.h | 56 + .../absl/synchronization/internal/futex.h | 177 + .../synchronization/internal/futex_waiter.cc | 111 + .../synchronization/internal/futex_waiter.h | 63 + .../synchronization/internal/graphcycles.cc | 708 ++++ .../synchronization/internal/graphcycles.h | 141 + .../internal/graphcycles_benchmark.cc | 44 + .../internal/graphcycles_test.cc | 463 +++ .../internal/kernel_timeout.cc | 225 ++ .../synchronization/internal/kernel_timeout.h | 178 + .../internal/kernel_timeout_test.cc | 393 ++ .../internal/per_thread_sem.cc | 106 + .../synchronization/internal/per_thread_sem.h | 119 + .../internal/per_thread_sem_test.cc | 190 + .../internal/pthread_waiter.cc | 167 + .../synchronization/internal/pthread_waiter.h | 60 + .../synchronization/internal/sem_waiter.cc | 122 + .../synchronization/internal/sem_waiter.h | 65 + .../synchronization/internal/stdcpp_waiter.cc | 91 + .../synchronization/internal/stdcpp_waiter.h | 56 + .../synchronization/internal/thread_pool.h | 96 + .../absl/synchronization/internal/waiter.h | 69 + .../synchronization/internal/waiter_base.cc | 42 + .../synchronization/internal/waiter_base.h | 90 + .../synchronization/internal/waiter_test.cc | 180 + .../synchronization/internal/win32_waiter.cc | 151 + .../synchronization/internal/win32_waiter.h | 72 + .../absl/synchronization/lifetime_test.cc | 181 + .../absl/absl/synchronization/mutex.cc | 2795 +++++++++++++ third_party/absl/absl/synchronization/mutex.h | 1218 ++++++ .../absl/synchronization/mutex_benchmark.cc | 339 ++ .../mutex_method_pointer_test.cc | 138 + .../absl/absl/synchronization/mutex_test.cc | 2037 ++++++++++ .../absl/absl/synchronization/notification.cc | 77 + .../absl/absl/synchronization/notification.h | 123 + .../absl/synchronization/notification_test.cc | 133 + third_party/absl/absl/time/BUILD.bazel | 158 + third_party/absl/absl/time/CMakeLists.txt | 141 + third_party/absl/absl/time/civil_time.cc | 199 + third_party/absl/absl/time/civil_time.h | 589 +++ .../absl/absl/time/civil_time_benchmark.cc | 130 + third_party/absl/absl/time/civil_time_test.cc | 1262 ++++++ third_party/absl/absl/time/clock.cc | 590 +++ third_party/absl/absl/time/clock.h | 78 + third_party/absl/absl/time/clock_benchmark.cc | 74 + third_party/absl/absl/time/clock_test.cc | 122 + third_party/absl/absl/time/duration.cc | 953 +++++ .../absl/absl/time/duration_benchmark.cc | 444 +++ third_party/absl/absl/time/duration_test.cc | 1837 +++++++++ third_party/absl/absl/time/flag_test.cc | 147 + third_party/absl/absl/time/format.cc | 161 + .../absl/absl/time/format_benchmark.cc | 64 + third_party/absl/absl/time/format_test.cc | 441 +++ .../absl/absl/time/internal/cctz/BUILD.bazel | 167 + .../internal/cctz/include/cctz/civil_time.h | 332 ++ .../cctz/include/cctz/civil_time_detail.h | 632 +++ .../internal/cctz/include/cctz/time_zone.h | 460 +++ .../cctz/include/cctz/zone_info_source.h | 102 + .../time/internal/cctz/src/cctz_benchmark.cc | 922 +++++ .../internal/cctz/src/civil_time_detail.cc | 94 + .../time/internal/cctz/src/civil_time_test.cc | 1066 +++++ .../time/internal/cctz/src/time_zone_fixed.cc | 140 + .../time/internal/cctz/src/time_zone_fixed.h | 52 + .../internal/cctz/src/time_zone_format.cc | 1029 +++++ .../cctz/src/time_zone_format_test.cc | 1774 +++++++++ .../time/internal/cctz/src/time_zone_if.cc | 47 + .../time/internal/cctz/src/time_zone_if.h | 80 + .../time/internal/cctz/src/time_zone_impl.cc | 115 + .../time/internal/cctz/src/time_zone_impl.h | 97 + .../time/internal/cctz/src/time_zone_info.cc | 1070 +++++ .../time/internal/cctz/src/time_zone_info.h | 128 + .../time/internal/cctz/src/time_zone_libc.cc | 333 ++ .../time/internal/cctz/src/time_zone_libc.h | 60 + .../internal/cctz/src/time_zone_lookup.cc | 363 ++ .../cctz/src/time_zone_lookup_test.cc | 1416 +++++++ .../time/internal/cctz/src/time_zone_posix.cc | 159 + .../time/internal/cctz/src/time_zone_posix.h | 132 + .../absl/absl/time/internal/cctz/src/tzfile.h | 118 + .../internal/cctz/src/zone_info_source.cc | 116 + .../internal/cctz/testdata/README.zoneinfo | 38 + .../absl/time/internal/cctz/testdata/version | 1 + .../cctz/testdata/zoneinfo/Africa/Abidjan | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/Africa/Accra | Bin 0 -> 700 bytes .../cctz/testdata/zoneinfo/Africa/Addis_Ababa | Bin 0 -> 151 bytes .../cctz/testdata/zoneinfo/Africa/Algiers | Bin 0 -> 470 bytes .../cctz/testdata/zoneinfo/Africa/Asmara | Bin 0 -> 170 bytes .../cctz/testdata/zoneinfo/Africa/Asmera | Bin 0 -> 170 bytes .../cctz/testdata/zoneinfo/Africa/Bamako | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/Africa/Bangui | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Banjul | Bin 0 -> 168 bytes .../cctz/testdata/zoneinfo/Africa/Bissau | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Africa/Blantyre | Bin 0 -> 165 bytes .../cctz/testdata/zoneinfo/Africa/Brazzaville | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Bujumbura | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Cairo | Bin 0 -> 1309 bytes .../cctz/testdata/zoneinfo/Africa/Casablanca | Bin 0 -> 1919 bytes .../cctz/testdata/zoneinfo/Africa/Ceuta | Bin 0 -> 562 bytes .../cctz/testdata/zoneinfo/Africa/Conakry | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/Africa/Dakar | Bin 0 -> 149 bytes .../testdata/zoneinfo/Africa/Dar_es_Salaam | Bin 0 -> 161 bytes .../cctz/testdata/zoneinfo/Africa/Djibouti | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Douala | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/El_Aaiun | Bin 0 -> 1830 bytes .../cctz/testdata/zoneinfo/Africa/Freetown | Bin 0 -> 324 bytes .../cctz/testdata/zoneinfo/Africa/Gaborone | Bin 0 -> 180 bytes .../cctz/testdata/zoneinfo/Africa/Harare | Bin 0 -> 131 bytes .../testdata/zoneinfo/Africa/Johannesburg | Bin 0 -> 190 bytes .../cctz/testdata/zoneinfo/Africa/Juba | Bin 0 -> 458 bytes .../cctz/testdata/zoneinfo/Africa/Kampala | Bin 0 -> 182 bytes .../cctz/testdata/zoneinfo/Africa/Khartoum | Bin 0 -> 458 bytes .../cctz/testdata/zoneinfo/Africa/Kigali | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Kinshasa | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Lagos | Bin 0 -> 180 bytes .../cctz/testdata/zoneinfo/Africa/Libreville | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Lome | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/Africa/Luanda | Bin 0 -> 146 bytes .../cctz/testdata/zoneinfo/Africa/Lubumbashi | Bin 0 -> 150 bytes .../cctz/testdata/zoneinfo/Africa/Lusaka | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Malabo | Bin 0 -> 150 bytes .../cctz/testdata/zoneinfo/Africa/Maputo | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Africa/Maseru | Bin 0 -> 157 bytes .../cctz/testdata/zoneinfo/Africa/Mbabane | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Africa/Mogadishu | Bin 0 -> 161 bytes .../cctz/testdata/zoneinfo/Africa/Monrovia | Bin 0 -> 164 bytes .../cctz/testdata/zoneinfo/Africa/Nairobi | Bin 0 -> 191 bytes .../cctz/testdata/zoneinfo/Africa/Ndjamena | Bin 0 -> 160 bytes .../cctz/testdata/zoneinfo/Africa/Niamey | Bin 0 -> 169 bytes .../cctz/testdata/zoneinfo/Africa/Nouakchott | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/Africa/Ouagadougou | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/Africa/Porto-Novo | Bin 0 -> 150 bytes .../cctz/testdata/zoneinfo/Africa/Sao_Tome | Bin 0 -> 173 bytes .../cctz/testdata/zoneinfo/Africa/Timbuktu | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/Africa/Tripoli | Bin 0 -> 431 bytes .../cctz/testdata/zoneinfo/Africa/Tunis | Bin 0 -> 449 bytes .../cctz/testdata/zoneinfo/Africa/Windhoek | Bin 0 -> 638 bytes .../cctz/testdata/zoneinfo/America/Adak | Bin 0 -> 969 bytes .../cctz/testdata/zoneinfo/America/Anchorage | Bin 0 -> 977 bytes .../cctz/testdata/zoneinfo/America/Anguilla | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Antigua | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/America/Araguaina | Bin 0 -> 592 bytes .../zoneinfo/America/Argentina/Buenos_Aires | Bin 0 -> 708 bytes .../zoneinfo/America/Argentina/Catamarca | Bin 0 -> 708 bytes .../zoneinfo/America/Argentina/ComodRivadavia | Bin 0 -> 708 bytes .../zoneinfo/America/Argentina/Cordoba | Bin 0 -> 708 bytes .../testdata/zoneinfo/America/Argentina/Jujuy | Bin 0 -> 690 bytes .../zoneinfo/America/Argentina/La_Rioja | Bin 0 -> 717 bytes .../zoneinfo/America/Argentina/Mendoza | Bin 0 -> 708 bytes .../zoneinfo/America/Argentina/Rio_Gallegos | Bin 0 -> 708 bytes .../testdata/zoneinfo/America/Argentina/Salta | Bin 0 -> 690 bytes .../zoneinfo/America/Argentina/San_Juan | Bin 0 -> 717 bytes .../zoneinfo/America/Argentina/San_Luis | Bin 0 -> 717 bytes .../zoneinfo/America/Argentina/Tucuman | Bin 0 -> 726 bytes .../zoneinfo/America/Argentina/Ushuaia | Bin 0 -> 708 bytes .../cctz/testdata/zoneinfo/America/Aruba | Bin 0 -> 151 bytes .../cctz/testdata/zoneinfo/America/Asuncion | Bin 0 -> 884 bytes .../cctz/testdata/zoneinfo/America/Atikokan | Bin 0 -> 224 bytes .../cctz/testdata/zoneinfo/America/Atka | Bin 0 -> 969 bytes .../cctz/testdata/zoneinfo/America/Bahia | Bin 0 -> 682 bytes .../testdata/zoneinfo/America/Bahia_Banderas | Bin 0 -> 728 bytes .../cctz/testdata/zoneinfo/America/Barbados | Bin 0 -> 278 bytes .../cctz/testdata/zoneinfo/America/Belem | Bin 0 -> 394 bytes .../cctz/testdata/zoneinfo/America/Belize | Bin 0 -> 1045 bytes .../testdata/zoneinfo/America/Blanc-Sablon | Bin 0 -> 205 bytes .../cctz/testdata/zoneinfo/America/Boa_Vista | Bin 0 -> 430 bytes .../cctz/testdata/zoneinfo/America/Bogota | Bin 0 -> 179 bytes .../cctz/testdata/zoneinfo/America/Boise | Bin 0 -> 999 bytes .../testdata/zoneinfo/America/Buenos_Aires | Bin 0 -> 708 bytes .../testdata/zoneinfo/America/Cambridge_Bay | Bin 0 -> 883 bytes .../testdata/zoneinfo/America/Campo_Grande | Bin 0 -> 952 bytes .../cctz/testdata/zoneinfo/America/Cancun | Bin 0 -> 529 bytes .../cctz/testdata/zoneinfo/America/Caracas | Bin 0 -> 190 bytes .../cctz/testdata/zoneinfo/America/Catamarca | Bin 0 -> 708 bytes .../cctz/testdata/zoneinfo/America/Cayenne | Bin 0 -> 151 bytes .../cctz/testdata/zoneinfo/America/Cayman | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/America/Chicago | Bin 0 -> 1754 bytes .../cctz/testdata/zoneinfo/America/Chihuahua | Bin 0 -> 691 bytes .../testdata/zoneinfo/America/Ciudad_Juarez | Bin 0 -> 718 bytes .../testdata/zoneinfo/America/Coral_Harbour | Bin 0 -> 224 bytes .../cctz/testdata/zoneinfo/America/Cordoba | Bin 0 -> 708 bytes .../cctz/testdata/zoneinfo/America/Costa_Rica | Bin 0 -> 232 bytes .../cctz/testdata/zoneinfo/America/Creston | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/America/Cuiaba | Bin 0 -> 934 bytes .../cctz/testdata/zoneinfo/America/Curacao | Bin 0 -> 151 bytes .../testdata/zoneinfo/America/Danmarkshavn | Bin 0 -> 447 bytes .../cctz/testdata/zoneinfo/America/Dawson | Bin 0 -> 1029 bytes .../testdata/zoneinfo/America/Dawson_Creek | Bin 0 -> 683 bytes .../cctz/testdata/zoneinfo/America/Denver | Bin 0 -> 1042 bytes .../cctz/testdata/zoneinfo/America/Detroit | Bin 0 -> 899 bytes .../cctz/testdata/zoneinfo/America/Dominica | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Edmonton | Bin 0 -> 970 bytes .../cctz/testdata/zoneinfo/America/Eirunepe | Bin 0 -> 436 bytes .../testdata/zoneinfo/America/El_Salvador | Bin 0 -> 176 bytes .../cctz/testdata/zoneinfo/America/Ensenada | Bin 0 -> 1025 bytes .../testdata/zoneinfo/America/Fort_Nelson | Bin 0 -> 1448 bytes .../cctz/testdata/zoneinfo/America/Fort_Wayne | Bin 0 -> 531 bytes .../cctz/testdata/zoneinfo/America/Fortaleza | Bin 0 -> 484 bytes .../cctz/testdata/zoneinfo/America/Glace_Bay | Bin 0 -> 880 bytes .../cctz/testdata/zoneinfo/America/Godthab | Bin 0 -> 965 bytes .../cctz/testdata/zoneinfo/America/Goose_Bay | Bin 0 -> 1580 bytes .../cctz/testdata/zoneinfo/America/Grand_Turk | Bin 0 -> 853 bytes .../cctz/testdata/zoneinfo/America/Grenada | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Guadeloupe | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Guatemala | Bin 0 -> 212 bytes .../cctz/testdata/zoneinfo/America/Guayaquil | Bin 0 -> 179 bytes .../cctz/testdata/zoneinfo/America/Guyana | Bin 0 -> 181 bytes .../cctz/testdata/zoneinfo/America/Halifax | Bin 0 -> 1672 bytes .../cctz/testdata/zoneinfo/America/Havana | Bin 0 -> 1117 bytes .../cctz/testdata/zoneinfo/America/Hermosillo | Bin 0 -> 286 bytes .../zoneinfo/America/Indiana/Indianapolis | Bin 0 -> 531 bytes .../testdata/zoneinfo/America/Indiana/Knox | Bin 0 -> 1016 bytes .../testdata/zoneinfo/America/Indiana/Marengo | Bin 0 -> 567 bytes .../zoneinfo/America/Indiana/Petersburg | Bin 0 -> 683 bytes .../zoneinfo/America/Indiana/Tell_City | Bin 0 -> 522 bytes .../testdata/zoneinfo/America/Indiana/Vevay | Bin 0 -> 369 bytes .../zoneinfo/America/Indiana/Vincennes | Bin 0 -> 558 bytes .../testdata/zoneinfo/America/Indiana/Winamac | Bin 0 -> 603 bytes .../testdata/zoneinfo/America/Indianapolis | Bin 0 -> 531 bytes .../cctz/testdata/zoneinfo/America/Inuvik | Bin 0 -> 817 bytes .../cctz/testdata/zoneinfo/America/Iqaluit | Bin 0 -> 855 bytes .../cctz/testdata/zoneinfo/America/Jamaica | Bin 0 -> 339 bytes .../cctz/testdata/zoneinfo/America/Jujuy | Bin 0 -> 690 bytes .../cctz/testdata/zoneinfo/America/Juneau | Bin 0 -> 966 bytes .../zoneinfo/America/Kentucky/Louisville | Bin 0 -> 1242 bytes .../zoneinfo/America/Kentucky/Monticello | Bin 0 -> 972 bytes .../cctz/testdata/zoneinfo/America/Knox_IN | Bin 0 -> 1016 bytes .../cctz/testdata/zoneinfo/America/Kralendijk | Bin 0 -> 151 bytes .../cctz/testdata/zoneinfo/America/La_Paz | Bin 0 -> 170 bytes .../cctz/testdata/zoneinfo/America/Lima | Bin 0 -> 283 bytes .../testdata/zoneinfo/America/Los_Angeles | Bin 0 -> 1294 bytes .../cctz/testdata/zoneinfo/America/Louisville | Bin 0 -> 1242 bytes .../testdata/zoneinfo/America/Lower_Princes | Bin 0 -> 151 bytes .../cctz/testdata/zoneinfo/America/Maceio | Bin 0 -> 502 bytes .../cctz/testdata/zoneinfo/America/Managua | Bin 0 -> 295 bytes .../cctz/testdata/zoneinfo/America/Manaus | Bin 0 -> 412 bytes .../cctz/testdata/zoneinfo/America/Marigot | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Martinique | Bin 0 -> 178 bytes .../cctz/testdata/zoneinfo/America/Matamoros | Bin 0 -> 437 bytes .../cctz/testdata/zoneinfo/America/Mazatlan | Bin 0 -> 718 bytes .../cctz/testdata/zoneinfo/America/Mendoza | Bin 0 -> 708 bytes .../cctz/testdata/zoneinfo/America/Menominee | Bin 0 -> 917 bytes .../cctz/testdata/zoneinfo/America/Merida | Bin 0 -> 654 bytes .../cctz/testdata/zoneinfo/America/Metlakatla | Bin 0 -> 586 bytes .../testdata/zoneinfo/America/Mexico_City | Bin 0 -> 773 bytes .../cctz/testdata/zoneinfo/America/Miquelon | Bin 0 -> 550 bytes .../cctz/testdata/zoneinfo/America/Moncton | Bin 0 -> 1493 bytes .../cctz/testdata/zoneinfo/America/Monterrey | Bin 0 -> 644 bytes .../cctz/testdata/zoneinfo/America/Montevideo | Bin 0 -> 969 bytes .../cctz/testdata/zoneinfo/America/Montreal | Bin 0 -> 1717 bytes .../cctz/testdata/zoneinfo/America/Montserrat | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Nassau | Bin 0 -> 1006 bytes .../cctz/testdata/zoneinfo/America/New_York | Bin 0 -> 1744 bytes .../cctz/testdata/zoneinfo/America/Nipigon | Bin 0 -> 1717 bytes .../cctz/testdata/zoneinfo/America/Nome | Bin 0 -> 975 bytes .../cctz/testdata/zoneinfo/America/Noronha | Bin 0 -> 484 bytes .../zoneinfo/America/North_Dakota/Beulah | Bin 0 -> 1043 bytes .../zoneinfo/America/North_Dakota/Center | Bin 0 -> 990 bytes .../zoneinfo/America/North_Dakota/New_Salem | Bin 0 -> 990 bytes .../cctz/testdata/zoneinfo/America/Nuuk | Bin 0 -> 965 bytes .../cctz/testdata/zoneinfo/America/Ojinaga | Bin 0 -> 718 bytes .../cctz/testdata/zoneinfo/America/Panama | Bin 0 -> 149 bytes .../testdata/zoneinfo/America/Pangnirtung | Bin 0 -> 855 bytes .../cctz/testdata/zoneinfo/America/Paramaribo | Bin 0 -> 187 bytes .../cctz/testdata/zoneinfo/America/Phoenix | Bin 0 -> 240 bytes .../testdata/zoneinfo/America/Port-au-Prince | Bin 0 -> 565 bytes .../testdata/zoneinfo/America/Port_of_Spain | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Porto_Acre | Bin 0 -> 418 bytes .../testdata/zoneinfo/America/Porto_Velho | Bin 0 -> 394 bytes .../testdata/zoneinfo/America/Puerto_Rico | Bin 0 -> 177 bytes .../testdata/zoneinfo/America/Punta_Arenas | Bin 0 -> 1218 bytes .../testdata/zoneinfo/America/Rainy_River | Bin 0 -> 1294 bytes .../testdata/zoneinfo/America/Rankin_Inlet | Bin 0 -> 807 bytes .../cctz/testdata/zoneinfo/America/Recife | Bin 0 -> 484 bytes .../cctz/testdata/zoneinfo/America/Regina | Bin 0 -> 638 bytes .../cctz/testdata/zoneinfo/America/Resolute | Bin 0 -> 807 bytes .../cctz/testdata/zoneinfo/America/Rio_Branco | Bin 0 -> 418 bytes .../cctz/testdata/zoneinfo/America/Rosario | Bin 0 -> 708 bytes .../testdata/zoneinfo/America/Santa_Isabel | Bin 0 -> 1025 bytes .../cctz/testdata/zoneinfo/America/Santarem | Bin 0 -> 409 bytes .../cctz/testdata/zoneinfo/America/Santiago | Bin 0 -> 1354 bytes .../testdata/zoneinfo/America/Santo_Domingo | Bin 0 -> 317 bytes .../cctz/testdata/zoneinfo/America/Sao_Paulo | Bin 0 -> 952 bytes .../testdata/zoneinfo/America/Scoresbysund | Bin 0 -> 984 bytes .../cctz/testdata/zoneinfo/America/Shiprock | Bin 0 -> 1042 bytes .../cctz/testdata/zoneinfo/America/Sitka | Bin 0 -> 956 bytes .../testdata/zoneinfo/America/St_Barthelemy | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/St_Johns | Bin 0 -> 1878 bytes .../cctz/testdata/zoneinfo/America/St_Kitts | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/St_Lucia | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/America/St_Thomas | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/St_Vincent | Bin 0 -> 149 bytes .../testdata/zoneinfo/America/Swift_Current | Bin 0 -> 368 bytes .../testdata/zoneinfo/America/Tegucigalpa | Bin 0 -> 194 bytes .../cctz/testdata/zoneinfo/America/Thule | Bin 0 -> 455 bytes .../testdata/zoneinfo/America/Thunder_Bay | Bin 0 -> 1717 bytes .../cctz/testdata/zoneinfo/America/Tijuana | Bin 0 -> 1025 bytes .../cctz/testdata/zoneinfo/America/Toronto | Bin 0 -> 1717 bytes .../cctz/testdata/zoneinfo/America/Tortola | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Vancouver | Bin 0 -> 1330 bytes .../cctz/testdata/zoneinfo/America/Virgin | Bin 0 -> 130 bytes .../cctz/testdata/zoneinfo/America/Whitehorse | Bin 0 -> 1029 bytes .../cctz/testdata/zoneinfo/America/Winnipeg | Bin 0 -> 1294 bytes .../cctz/testdata/zoneinfo/America/Yakutat | Bin 0 -> 946 bytes .../testdata/zoneinfo/America/Yellowknife | Bin 0 -> 970 bytes .../cctz/testdata/zoneinfo/Antarctica/Casey | Bin 0 -> 287 bytes .../cctz/testdata/zoneinfo/Antarctica/Davis | Bin 0 -> 197 bytes .../zoneinfo/Antarctica/DumontDUrville | Bin 0 -> 152 bytes .../testdata/zoneinfo/Antarctica/Macquarie | Bin 0 -> 976 bytes .../cctz/testdata/zoneinfo/Antarctica/Mawson | Bin 0 -> 152 bytes .../cctz/testdata/zoneinfo/Antarctica/McMurdo | Bin 0 -> 768 bytes .../cctz/testdata/zoneinfo/Antarctica/Palmer | Bin 0 -> 887 bytes .../cctz/testdata/zoneinfo/Antarctica/Rothera | Bin 0 -> 132 bytes .../testdata/zoneinfo/Antarctica/South_Pole | Bin 0 -> 768 bytes .../cctz/testdata/zoneinfo/Antarctica/Syowa | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Antarctica/Troll | Bin 0 -> 158 bytes .../cctz/testdata/zoneinfo/Antarctica/Vostok | Bin 0 -> 170 bytes .../testdata/zoneinfo/Arctic/Longyearbyen | Bin 0 -> 676 bytes .../internal/cctz/testdata/zoneinfo/Asia/Aden | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Almaty | Bin 0 -> 609 bytes .../cctz/testdata/zoneinfo/Asia/Amman | Bin 0 -> 928 bytes .../cctz/testdata/zoneinfo/Asia/Anadyr | Bin 0 -> 743 bytes .../cctz/testdata/zoneinfo/Asia/Aqtau | Bin 0 -> 606 bytes .../cctz/testdata/zoneinfo/Asia/Aqtobe | Bin 0 -> 615 bytes .../cctz/testdata/zoneinfo/Asia/Ashgabat | Bin 0 -> 375 bytes .../cctz/testdata/zoneinfo/Asia/Ashkhabad | Bin 0 -> 375 bytes .../cctz/testdata/zoneinfo/Asia/Atyrau | Bin 0 -> 616 bytes .../cctz/testdata/zoneinfo/Asia/Baghdad | Bin 0 -> 630 bytes .../cctz/testdata/zoneinfo/Asia/Bahrain | Bin 0 -> 173 bytes .../internal/cctz/testdata/zoneinfo/Asia/Baku | Bin 0 -> 744 bytes .../cctz/testdata/zoneinfo/Asia/Bangkok | Bin 0 -> 152 bytes .../cctz/testdata/zoneinfo/Asia/Barnaul | Bin 0 -> 753 bytes .../cctz/testdata/zoneinfo/Asia/Beirut | Bin 0 -> 732 bytes .../cctz/testdata/zoneinfo/Asia/Bishkek | Bin 0 -> 618 bytes .../cctz/testdata/zoneinfo/Asia/Brunei | Bin 0 -> 154 bytes .../cctz/testdata/zoneinfo/Asia/Calcutta | Bin 0 -> 220 bytes .../cctz/testdata/zoneinfo/Asia/Chita | Bin 0 -> 750 bytes .../cctz/testdata/zoneinfo/Asia/Choibalsan | Bin 0 -> 619 bytes .../cctz/testdata/zoneinfo/Asia/Chongqing | Bin 0 -> 393 bytes .../cctz/testdata/zoneinfo/Asia/Chungking | Bin 0 -> 393 bytes .../cctz/testdata/zoneinfo/Asia/Colombo | Bin 0 -> 247 bytes .../cctz/testdata/zoneinfo/Asia/Dacca | Bin 0 -> 231 bytes .../cctz/testdata/zoneinfo/Asia/Damascus | Bin 0 -> 1234 bytes .../cctz/testdata/zoneinfo/Asia/Dhaka | Bin 0 -> 231 bytes .../internal/cctz/testdata/zoneinfo/Asia/Dili | Bin 0 -> 170 bytes .../cctz/testdata/zoneinfo/Asia/Dubai | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Dushanbe | Bin 0 -> 366 bytes .../cctz/testdata/zoneinfo/Asia/Famagusta | Bin 0 -> 940 bytes .../internal/cctz/testdata/zoneinfo/Asia/Gaza | Bin 0 -> 2968 bytes .../cctz/testdata/zoneinfo/Asia/Harbin | Bin 0 -> 393 bytes .../cctz/testdata/zoneinfo/Asia/Hebron | Bin 0 -> 2986 bytes .../cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh | Bin 0 -> 236 bytes .../cctz/testdata/zoneinfo/Asia/Hong_Kong | Bin 0 -> 775 bytes .../internal/cctz/testdata/zoneinfo/Asia/Hovd | Bin 0 -> 594 bytes .../cctz/testdata/zoneinfo/Asia/Irkutsk | Bin 0 -> 760 bytes .../cctz/testdata/zoneinfo/Asia/Istanbul | Bin 0 -> 1200 bytes .../cctz/testdata/zoneinfo/Asia/Jakarta | Bin 0 -> 248 bytes .../cctz/testdata/zoneinfo/Asia/Jayapura | Bin 0 -> 171 bytes .../cctz/testdata/zoneinfo/Asia/Jerusalem | Bin 0 -> 1074 bytes .../cctz/testdata/zoneinfo/Asia/Kabul | Bin 0 -> 159 bytes .../cctz/testdata/zoneinfo/Asia/Kamchatka | Bin 0 -> 727 bytes .../cctz/testdata/zoneinfo/Asia/Karachi | Bin 0 -> 266 bytes .../cctz/testdata/zoneinfo/Asia/Kashgar | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Kathmandu | Bin 0 -> 161 bytes .../cctz/testdata/zoneinfo/Asia/Katmandu | Bin 0 -> 161 bytes .../cctz/testdata/zoneinfo/Asia/Khandyga | Bin 0 -> 775 bytes .../cctz/testdata/zoneinfo/Asia/Kolkata | Bin 0 -> 220 bytes .../cctz/testdata/zoneinfo/Asia/Krasnoyarsk | Bin 0 -> 741 bytes .../cctz/testdata/zoneinfo/Asia/Kuala_Lumpur | Bin 0 -> 256 bytes .../cctz/testdata/zoneinfo/Asia/Kuching | Bin 0 -> 320 bytes .../cctz/testdata/zoneinfo/Asia/Kuwait | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Macao | Bin 0 -> 791 bytes .../cctz/testdata/zoneinfo/Asia/Macau | Bin 0 -> 791 bytes .../cctz/testdata/zoneinfo/Asia/Magadan | Bin 0 -> 751 bytes .../cctz/testdata/zoneinfo/Asia/Makassar | Bin 0 -> 190 bytes .../cctz/testdata/zoneinfo/Asia/Manila | Bin 0 -> 238 bytes .../cctz/testdata/zoneinfo/Asia/Muscat | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Nicosia | Bin 0 -> 597 bytes .../cctz/testdata/zoneinfo/Asia/Novokuznetsk | Bin 0 -> 726 bytes .../cctz/testdata/zoneinfo/Asia/Novosibirsk | Bin 0 -> 753 bytes .../internal/cctz/testdata/zoneinfo/Asia/Omsk | Bin 0 -> 741 bytes .../internal/cctz/testdata/zoneinfo/Asia/Oral | Bin 0 -> 625 bytes .../cctz/testdata/zoneinfo/Asia/Phnom_Penh | Bin 0 -> 200 bytes .../cctz/testdata/zoneinfo/Asia/Pontianak | Bin 0 -> 247 bytes .../cctz/testdata/zoneinfo/Asia/Pyongyang | Bin 0 -> 183 bytes .../cctz/testdata/zoneinfo/Asia/Qatar | Bin 0 -> 152 bytes .../cctz/testdata/zoneinfo/Asia/Qostanay | Bin 0 -> 615 bytes .../cctz/testdata/zoneinfo/Asia/Qyzylorda | Bin 0 -> 624 bytes .../cctz/testdata/zoneinfo/Asia/Rangoon | Bin 0 -> 187 bytes .../cctz/testdata/zoneinfo/Asia/Riyadh | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Saigon | Bin 0 -> 236 bytes .../cctz/testdata/zoneinfo/Asia/Sakhalin | Bin 0 -> 755 bytes .../cctz/testdata/zoneinfo/Asia/Samarkand | Bin 0 -> 366 bytes .../cctz/testdata/zoneinfo/Asia/Seoul | Bin 0 -> 415 bytes .../cctz/testdata/zoneinfo/Asia/Shanghai | Bin 0 -> 393 bytes .../cctz/testdata/zoneinfo/Asia/Singapore | Bin 0 -> 256 bytes .../cctz/testdata/zoneinfo/Asia/Srednekolymsk | Bin 0 -> 742 bytes .../cctz/testdata/zoneinfo/Asia/Taipei | Bin 0 -> 511 bytes .../cctz/testdata/zoneinfo/Asia/Tashkent | Bin 0 -> 366 bytes .../cctz/testdata/zoneinfo/Asia/Tbilisi | Bin 0 -> 629 bytes .../cctz/testdata/zoneinfo/Asia/Tehran | Bin 0 -> 812 bytes .../cctz/testdata/zoneinfo/Asia/Tel_Aviv | Bin 0 -> 1074 bytes .../cctz/testdata/zoneinfo/Asia/Thimbu | Bin 0 -> 154 bytes .../cctz/testdata/zoneinfo/Asia/Thimphu | Bin 0 -> 154 bytes .../cctz/testdata/zoneinfo/Asia/Tokyo | Bin 0 -> 213 bytes .../cctz/testdata/zoneinfo/Asia/Tomsk | Bin 0 -> 753 bytes .../cctz/testdata/zoneinfo/Asia/Ujung_Pandang | Bin 0 -> 190 bytes .../cctz/testdata/zoneinfo/Asia/Ulaanbaatar | Bin 0 -> 594 bytes .../cctz/testdata/zoneinfo/Asia/Ulan_Bator | Bin 0 -> 594 bytes .../cctz/testdata/zoneinfo/Asia/Urumqi | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Asia/Ust-Nera | Bin 0 -> 771 bytes .../cctz/testdata/zoneinfo/Asia/Vientiane | Bin 0 -> 218 bytes .../cctz/testdata/zoneinfo/Asia/Vladivostok | Bin 0 -> 742 bytes .../cctz/testdata/zoneinfo/Asia/Yakutsk | Bin 0 -> 741 bytes .../cctz/testdata/zoneinfo/Asia/Yangon | Bin 0 -> 187 bytes .../cctz/testdata/zoneinfo/Asia/Yekaterinburg | Bin 0 -> 760 bytes .../cctz/testdata/zoneinfo/Asia/Yerevan | Bin 0 -> 708 bytes .../cctz/testdata/zoneinfo/Atlantic/Azores | Bin 0 -> 1453 bytes .../cctz/testdata/zoneinfo/Atlantic/Bermuda | Bin 0 -> 1024 bytes .../cctz/testdata/zoneinfo/Atlantic/Canary | Bin 0 -> 478 bytes .../testdata/zoneinfo/Atlantic/Cape_Verde | Bin 0 -> 175 bytes .../cctz/testdata/zoneinfo/Atlantic/Faeroe | Bin 0 -> 441 bytes .../cctz/testdata/zoneinfo/Atlantic/Faroe | Bin 0 -> 441 bytes .../cctz/testdata/zoneinfo/Atlantic/Jan_Mayen | Bin 0 -> 676 bytes .../cctz/testdata/zoneinfo/Atlantic/Madeira | Bin 0 -> 1453 bytes .../cctz/testdata/zoneinfo/Atlantic/Reykjavik | Bin 0 -> 753 bytes .../testdata/zoneinfo/Atlantic/South_Georgia | Bin 0 -> 132 bytes .../cctz/testdata/zoneinfo/Atlantic/St_Helena | Bin 0 -> 149 bytes .../cctz/testdata/zoneinfo/Atlantic/Stanley | Bin 0 -> 789 bytes .../cctz/testdata/zoneinfo/Australia/ACT | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/Adelaide | Bin 0 -> 921 bytes .../cctz/testdata/zoneinfo/Australia/Brisbane | Bin 0 -> 289 bytes .../testdata/zoneinfo/Australia/Broken_Hill | Bin 0 -> 941 bytes .../cctz/testdata/zoneinfo/Australia/Canberra | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/Currie | Bin 0 -> 1003 bytes .../cctz/testdata/zoneinfo/Australia/Darwin | Bin 0 -> 234 bytes .../cctz/testdata/zoneinfo/Australia/Eucla | Bin 0 -> 314 bytes .../cctz/testdata/zoneinfo/Australia/Hobart | Bin 0 -> 1003 bytes .../cctz/testdata/zoneinfo/Australia/LHI | Bin 0 -> 692 bytes .../cctz/testdata/zoneinfo/Australia/Lindeman | Bin 0 -> 325 bytes .../testdata/zoneinfo/Australia/Lord_Howe | Bin 0 -> 692 bytes .../testdata/zoneinfo/Australia/Melbourne | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/NSW | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/North | Bin 0 -> 234 bytes .../cctz/testdata/zoneinfo/Australia/Perth | Bin 0 -> 306 bytes .../testdata/zoneinfo/Australia/Queensland | Bin 0 -> 289 bytes .../cctz/testdata/zoneinfo/Australia/South | Bin 0 -> 921 bytes .../cctz/testdata/zoneinfo/Australia/Sydney | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/Tasmania | Bin 0 -> 1003 bytes .../cctz/testdata/zoneinfo/Australia/Victoria | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Australia/West | Bin 0 -> 306 bytes .../testdata/zoneinfo/Australia/Yancowinna | Bin 0 -> 941 bytes .../cctz/testdata/zoneinfo/Brazil/Acre | Bin 0 -> 418 bytes .../cctz/testdata/zoneinfo/Brazil/DeNoronha | Bin 0 -> 484 bytes .../cctz/testdata/zoneinfo/Brazil/East | Bin 0 -> 952 bytes .../cctz/testdata/zoneinfo/Brazil/West | Bin 0 -> 412 bytes .../time/internal/cctz/testdata/zoneinfo/CET | Bin 0 -> 621 bytes .../internal/cctz/testdata/zoneinfo/CST6CDT | Bin 0 -> 951 bytes .../cctz/testdata/zoneinfo/Canada/Atlantic | Bin 0 -> 1672 bytes .../cctz/testdata/zoneinfo/Canada/Central | Bin 0 -> 1294 bytes .../cctz/testdata/zoneinfo/Canada/Eastern | Bin 0 -> 1717 bytes .../cctz/testdata/zoneinfo/Canada/Mountain | Bin 0 -> 970 bytes .../testdata/zoneinfo/Canada/Newfoundland | Bin 0 -> 1878 bytes .../cctz/testdata/zoneinfo/Canada/Pacific | Bin 0 -> 1330 bytes .../testdata/zoneinfo/Canada/Saskatchewan | Bin 0 -> 638 bytes .../cctz/testdata/zoneinfo/Canada/Yukon | Bin 0 -> 1029 bytes .../cctz/testdata/zoneinfo/Chile/Continental | Bin 0 -> 1354 bytes .../cctz/testdata/zoneinfo/Chile/EasterIsland | Bin 0 -> 1174 bytes .../time/internal/cctz/testdata/zoneinfo/Cuba | Bin 0 -> 1117 bytes .../time/internal/cctz/testdata/zoneinfo/EET | Bin 0 -> 497 bytes .../time/internal/cctz/testdata/zoneinfo/EST | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/EST5EDT | Bin 0 -> 951 bytes .../internal/cctz/testdata/zoneinfo/Egypt | Bin 0 -> 1309 bytes .../time/internal/cctz/testdata/zoneinfo/Eire | Bin 0 -> 1496 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+0 | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+1 | Bin 0 -> 113 bytes .../cctz/testdata/zoneinfo/Etc/GMT+10 | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Etc/GMT+11 | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Etc/GMT+12 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+2 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+3 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+4 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+5 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+6 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+7 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+8 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT+9 | Bin 0 -> 113 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-0 | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-1 | Bin 0 -> 114 bytes .../cctz/testdata/zoneinfo/Etc/GMT-10 | Bin 0 -> 115 bytes .../cctz/testdata/zoneinfo/Etc/GMT-11 | Bin 0 -> 115 bytes .../cctz/testdata/zoneinfo/Etc/GMT-12 | Bin 0 -> 115 bytes .../cctz/testdata/zoneinfo/Etc/GMT-13 | Bin 0 -> 115 bytes .../cctz/testdata/zoneinfo/Etc/GMT-14 | Bin 0 -> 115 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-2 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-3 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-4 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-5 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-6 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-7 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-8 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT-9 | Bin 0 -> 114 bytes .../internal/cctz/testdata/zoneinfo/Etc/GMT0 | Bin 0 -> 111 bytes .../cctz/testdata/zoneinfo/Etc/Greenwich | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/UCT | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/UTC | Bin 0 -> 111 bytes .../cctz/testdata/zoneinfo/Etc/Universal | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Etc/Zulu | Bin 0 -> 111 bytes .../cctz/testdata/zoneinfo/Europe/Amsterdam | Bin 0 -> 1071 bytes .../cctz/testdata/zoneinfo/Europe/Andorra | Bin 0 -> 389 bytes .../cctz/testdata/zoneinfo/Europe/Astrakhan | Bin 0 -> 726 bytes .../cctz/testdata/zoneinfo/Europe/Athens | Bin 0 -> 682 bytes .../cctz/testdata/zoneinfo/Europe/Belfast | Bin 0 -> 1599 bytes .../cctz/testdata/zoneinfo/Europe/Belgrade | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Berlin | Bin 0 -> 705 bytes .../cctz/testdata/zoneinfo/Europe/Bratislava | Bin 0 -> 723 bytes .../cctz/testdata/zoneinfo/Europe/Brussels | Bin 0 -> 1103 bytes .../cctz/testdata/zoneinfo/Europe/Bucharest | Bin 0 -> 661 bytes .../cctz/testdata/zoneinfo/Europe/Budapest | Bin 0 -> 766 bytes .../cctz/testdata/zoneinfo/Europe/Busingen | Bin 0 -> 497 bytes .../cctz/testdata/zoneinfo/Europe/Chisinau | Bin 0 -> 755 bytes .../cctz/testdata/zoneinfo/Europe/Copenhagen | Bin 0 -> 623 bytes .../cctz/testdata/zoneinfo/Europe/Dublin | Bin 0 -> 1496 bytes .../cctz/testdata/zoneinfo/Europe/Gibraltar | Bin 0 -> 1220 bytes .../cctz/testdata/zoneinfo/Europe/Guernsey | Bin 0 -> 1611 bytes .../cctz/testdata/zoneinfo/Europe/Helsinki | Bin 0 -> 481 bytes .../cctz/testdata/zoneinfo/Europe/Isle_of_Man | Bin 0 -> 1599 bytes .../cctz/testdata/zoneinfo/Europe/Istanbul | Bin 0 -> 1200 bytes .../cctz/testdata/zoneinfo/Europe/Jersey | Bin 0 -> 1611 bytes .../cctz/testdata/zoneinfo/Europe/Kaliningrad | Bin 0 -> 904 bytes .../cctz/testdata/zoneinfo/Europe/Kiev | Bin 0 -> 558 bytes .../cctz/testdata/zoneinfo/Europe/Kirov | Bin 0 -> 735 bytes .../cctz/testdata/zoneinfo/Europe/Kyiv | Bin 0 -> 558 bytes .../cctz/testdata/zoneinfo/Europe/Lisbon | Bin 0 -> 1454 bytes .../cctz/testdata/zoneinfo/Europe/Ljubljana | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/London | Bin 0 -> 1599 bytes .../cctz/testdata/zoneinfo/Europe/Luxembourg | Bin 0 -> 1087 bytes .../cctz/testdata/zoneinfo/Europe/Madrid | Bin 0 -> 897 bytes .../cctz/testdata/zoneinfo/Europe/Malta | Bin 0 -> 928 bytes .../cctz/testdata/zoneinfo/Europe/Mariehamn | Bin 0 -> 481 bytes .../cctz/testdata/zoneinfo/Europe/Minsk | Bin 0 -> 808 bytes .../cctz/testdata/zoneinfo/Europe/Monaco | Bin 0 -> 1114 bytes .../cctz/testdata/zoneinfo/Europe/Moscow | Bin 0 -> 908 bytes .../cctz/testdata/zoneinfo/Europe/Nicosia | Bin 0 -> 597 bytes .../cctz/testdata/zoneinfo/Europe/Oslo | Bin 0 -> 676 bytes .../cctz/testdata/zoneinfo/Europe/Paris | Bin 0 -> 1105 bytes .../cctz/testdata/zoneinfo/Europe/Podgorica | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Prague | Bin 0 -> 723 bytes .../cctz/testdata/zoneinfo/Europe/Riga | Bin 0 -> 694 bytes .../cctz/testdata/zoneinfo/Europe/Rome | Bin 0 -> 947 bytes .../cctz/testdata/zoneinfo/Europe/Samara | Bin 0 -> 732 bytes .../cctz/testdata/zoneinfo/Europe/San_Marino | Bin 0 -> 947 bytes .../cctz/testdata/zoneinfo/Europe/Sarajevo | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Saratov | Bin 0 -> 726 bytes .../cctz/testdata/zoneinfo/Europe/Simferopol | Bin 0 -> 865 bytes .../cctz/testdata/zoneinfo/Europe/Skopje | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Sofia | Bin 0 -> 592 bytes .../cctz/testdata/zoneinfo/Europe/Stockholm | Bin 0 -> 497 bytes .../cctz/testdata/zoneinfo/Europe/Tallinn | Bin 0 -> 675 bytes .../cctz/testdata/zoneinfo/Europe/Tirane | Bin 0 -> 604 bytes .../cctz/testdata/zoneinfo/Europe/Tiraspol | Bin 0 -> 755 bytes .../cctz/testdata/zoneinfo/Europe/Ulyanovsk | Bin 0 -> 760 bytes .../cctz/testdata/zoneinfo/Europe/Uzhgorod | Bin 0 -> 558 bytes .../cctz/testdata/zoneinfo/Europe/Vaduz | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Vatican | Bin 0 -> 947 bytes .../cctz/testdata/zoneinfo/Europe/Vienna | Bin 0 -> 658 bytes .../cctz/testdata/zoneinfo/Europe/Vilnius | Bin 0 -> 676 bytes .../cctz/testdata/zoneinfo/Europe/Volgograd | Bin 0 -> 753 bytes .../cctz/testdata/zoneinfo/Europe/Warsaw | Bin 0 -> 923 bytes .../cctz/testdata/zoneinfo/Europe/Zagreb | Bin 0 -> 478 bytes .../cctz/testdata/zoneinfo/Europe/Zaporozhye | Bin 0 -> 558 bytes .../cctz/testdata/zoneinfo/Europe/Zurich | Bin 0 -> 497 bytes .../internal/cctz/testdata/zoneinfo/Factory | Bin 0 -> 113 bytes .../time/internal/cctz/testdata/zoneinfo/GB | Bin 0 -> 1599 bytes .../internal/cctz/testdata/zoneinfo/GB-Eire | Bin 0 -> 1599 bytes .../time/internal/cctz/testdata/zoneinfo/GMT | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/GMT+0 | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/GMT-0 | Bin 0 -> 111 bytes .../time/internal/cctz/testdata/zoneinfo/GMT0 | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Greenwich | Bin 0 -> 111 bytes .../time/internal/cctz/testdata/zoneinfo/HST | Bin 0 -> 112 bytes .../internal/cctz/testdata/zoneinfo/Hongkong | Bin 0 -> 775 bytes .../internal/cctz/testdata/zoneinfo/Iceland | Bin 0 -> 753 bytes .../testdata/zoneinfo/Indian/Antananarivo | Bin 0 -> 160 bytes .../cctz/testdata/zoneinfo/Indian/Chagos | Bin 0 -> 152 bytes .../cctz/testdata/zoneinfo/Indian/Christmas | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Indian/Cocos | Bin 0 -> 140 bytes .../cctz/testdata/zoneinfo/Indian/Comoro | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Indian/Kerguelen | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Indian/Mahe | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Indian/Maldives | Bin 0 -> 152 bytes .../cctz/testdata/zoneinfo/Indian/Mauritius | Bin 0 -> 179 bytes .../cctz/testdata/zoneinfo/Indian/Mayotte | Bin 0 -> 131 bytes .../cctz/testdata/zoneinfo/Indian/Reunion | Bin 0 -> 133 bytes .../time/internal/cctz/testdata/zoneinfo/Iran | Bin 0 -> 812 bytes .../internal/cctz/testdata/zoneinfo/Israel | Bin 0 -> 1074 bytes .../internal/cctz/testdata/zoneinfo/Jamaica | Bin 0 -> 339 bytes .../internal/cctz/testdata/zoneinfo/Japan | Bin 0 -> 213 bytes .../internal/cctz/testdata/zoneinfo/Kwajalein | Bin 0 -> 219 bytes .../internal/cctz/testdata/zoneinfo/Libya | Bin 0 -> 431 bytes .../time/internal/cctz/testdata/zoneinfo/MET | Bin 0 -> 621 bytes .../time/internal/cctz/testdata/zoneinfo/MST | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/MST7MDT | Bin 0 -> 951 bytes .../cctz/testdata/zoneinfo/Mexico/BajaNorte | Bin 0 -> 1025 bytes .../cctz/testdata/zoneinfo/Mexico/BajaSur | Bin 0 -> 718 bytes .../cctz/testdata/zoneinfo/Mexico/General | Bin 0 -> 773 bytes .../time/internal/cctz/testdata/zoneinfo/NZ | Bin 0 -> 1043 bytes .../internal/cctz/testdata/zoneinfo/NZ-CHAT | Bin 0 -> 808 bytes .../internal/cctz/testdata/zoneinfo/Navajo | Bin 0 -> 1042 bytes .../time/internal/cctz/testdata/zoneinfo/PRC | Bin 0 -> 393 bytes .../internal/cctz/testdata/zoneinfo/PST8PDT | Bin 0 -> 951 bytes .../cctz/testdata/zoneinfo/Pacific/Apia | Bin 0 -> 407 bytes .../cctz/testdata/zoneinfo/Pacific/Auckland | Bin 0 -> 1043 bytes .../testdata/zoneinfo/Pacific/Bougainville | Bin 0 -> 201 bytes .../cctz/testdata/zoneinfo/Pacific/Chatham | Bin 0 -> 808 bytes .../cctz/testdata/zoneinfo/Pacific/Chuuk | Bin 0 -> 195 bytes .../cctz/testdata/zoneinfo/Pacific/Easter | Bin 0 -> 1174 bytes .../cctz/testdata/zoneinfo/Pacific/Efate | Bin 0 -> 342 bytes .../cctz/testdata/zoneinfo/Pacific/Enderbury | Bin 0 -> 172 bytes .../cctz/testdata/zoneinfo/Pacific/Fakaofo | Bin 0 -> 153 bytes .../cctz/testdata/zoneinfo/Pacific/Fiji | Bin 0 -> 396 bytes .../cctz/testdata/zoneinfo/Pacific/Funafuti | Bin 0 -> 134 bytes .../cctz/testdata/zoneinfo/Pacific/Galapagos | Bin 0 -> 175 bytes .../cctz/testdata/zoneinfo/Pacific/Gambier | Bin 0 -> 132 bytes .../testdata/zoneinfo/Pacific/Guadalcanal | Bin 0 -> 134 bytes .../cctz/testdata/zoneinfo/Pacific/Guam | Bin 0 -> 350 bytes .../cctz/testdata/zoneinfo/Pacific/Honolulu | Bin 0 -> 221 bytes .../cctz/testdata/zoneinfo/Pacific/Johnston | Bin 0 -> 221 bytes .../cctz/testdata/zoneinfo/Pacific/Kanton | Bin 0 -> 172 bytes .../cctz/testdata/zoneinfo/Pacific/Kiritimati | Bin 0 -> 174 bytes .../cctz/testdata/zoneinfo/Pacific/Kosrae | Bin 0 -> 242 bytes .../cctz/testdata/zoneinfo/Pacific/Kwajalein | Bin 0 -> 219 bytes .../cctz/testdata/zoneinfo/Pacific/Majuro | Bin 0 -> 218 bytes .../cctz/testdata/zoneinfo/Pacific/Marquesas | Bin 0 -> 139 bytes .../cctz/testdata/zoneinfo/Pacific/Midway | Bin 0 -> 169 bytes .../cctz/testdata/zoneinfo/Pacific/Nauru | Bin 0 -> 183 bytes .../cctz/testdata/zoneinfo/Pacific/Niue | Bin 0 -> 154 bytes .../cctz/testdata/zoneinfo/Pacific/Norfolk | Bin 0 -> 237 bytes .../cctz/testdata/zoneinfo/Pacific/Noumea | Bin 0 -> 198 bytes .../cctz/testdata/zoneinfo/Pacific/Pago_Pago | Bin 0 -> 146 bytes .../cctz/testdata/zoneinfo/Pacific/Palau | Bin 0 -> 148 bytes .../cctz/testdata/zoneinfo/Pacific/Pitcairn | Bin 0 -> 153 bytes .../cctz/testdata/zoneinfo/Pacific/Pohnpei | Bin 0 -> 214 bytes .../cctz/testdata/zoneinfo/Pacific/Ponape | Bin 0 -> 214 bytes .../testdata/zoneinfo/Pacific/Port_Moresby | Bin 0 -> 154 bytes .../cctz/testdata/zoneinfo/Pacific/Rarotonga | Bin 0 -> 406 bytes .../cctz/testdata/zoneinfo/Pacific/Saipan | Bin 0 -> 341 bytes .../cctz/testdata/zoneinfo/Pacific/Samoa | Bin 0 -> 146 bytes .../cctz/testdata/zoneinfo/Pacific/Tahiti | Bin 0 -> 133 bytes .../cctz/testdata/zoneinfo/Pacific/Tarawa | Bin 0 -> 134 bytes .../cctz/testdata/zoneinfo/Pacific/Tongatapu | Bin 0 -> 237 bytes .../cctz/testdata/zoneinfo/Pacific/Truk | Bin 0 -> 195 bytes .../cctz/testdata/zoneinfo/Pacific/Wake | Bin 0 -> 134 bytes .../cctz/testdata/zoneinfo/Pacific/Wallis | Bin 0 -> 134 bytes .../cctz/testdata/zoneinfo/Pacific/Yap | Bin 0 -> 195 bytes .../internal/cctz/testdata/zoneinfo/Poland | Bin 0 -> 923 bytes .../internal/cctz/testdata/zoneinfo/Portugal | Bin 0 -> 1454 bytes .../time/internal/cctz/testdata/zoneinfo/ROC | Bin 0 -> 511 bytes .../time/internal/cctz/testdata/zoneinfo/ROK | Bin 0 -> 415 bytes .../internal/cctz/testdata/zoneinfo/Singapore | Bin 0 -> 256 bytes .../internal/cctz/testdata/zoneinfo/Turkey | Bin 0 -> 1200 bytes .../time/internal/cctz/testdata/zoneinfo/UCT | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/US/Alaska | Bin 0 -> 977 bytes .../cctz/testdata/zoneinfo/US/Aleutian | Bin 0 -> 969 bytes .../cctz/testdata/zoneinfo/US/Arizona | Bin 0 -> 240 bytes .../cctz/testdata/zoneinfo/US/Central | Bin 0 -> 1754 bytes .../cctz/testdata/zoneinfo/US/East-Indiana | Bin 0 -> 531 bytes .../cctz/testdata/zoneinfo/US/Eastern | Bin 0 -> 1744 bytes .../internal/cctz/testdata/zoneinfo/US/Hawaii | Bin 0 -> 221 bytes .../cctz/testdata/zoneinfo/US/Indiana-Starke | Bin 0 -> 1016 bytes .../cctz/testdata/zoneinfo/US/Michigan | Bin 0 -> 899 bytes .../cctz/testdata/zoneinfo/US/Mountain | Bin 0 -> 1042 bytes .../cctz/testdata/zoneinfo/US/Pacific | Bin 0 -> 1294 bytes .../internal/cctz/testdata/zoneinfo/US/Samoa | Bin 0 -> 146 bytes .../time/internal/cctz/testdata/zoneinfo/UTC | Bin 0 -> 111 bytes .../internal/cctz/testdata/zoneinfo/Universal | Bin 0 -> 111 bytes .../time/internal/cctz/testdata/zoneinfo/W-SU | Bin 0 -> 908 bytes .../time/internal/cctz/testdata/zoneinfo/WET | Bin 0 -> 494 bytes .../time/internal/cctz/testdata/zoneinfo/Zulu | Bin 0 -> 111 bytes .../cctz/testdata/zoneinfo/iso3166.tab | 279 ++ .../cctz/testdata/zoneinfo/zone1970.tab | 375 ++ .../cctz/testdata/zoneinfo/zonenow.tab | 301 ++ .../time/internal/get_current_time_chrono.inc | 31 + .../time/internal/get_current_time_posix.inc | 24 + .../absl/absl/time/internal/test_util.cc | 32 + .../absl/absl/time/internal/test_util.h | 33 + third_party/absl/absl/time/time.cc | 507 +++ third_party/absl/absl/time/time.h | 1815 +++++++++ third_party/absl/absl/time/time_benchmark.cc | 321 ++ third_party/absl/absl/time/time_test.cc | 1288 ++++++ third_party/absl/absl/time/time_zone_test.cc | 97 + third_party/absl/absl/types/BUILD.bazel | 317 ++ third_party/absl/absl/types/CMakeLists.txt | 321 ++ third_party/absl/absl/types/any.h | 519 +++ .../absl/types/any_exception_safety_test.cc | 173 + third_party/absl/absl/types/any_test.cc | 778 ++++ third_party/absl/absl/types/bad_any_cast.cc | 64 + third_party/absl/absl/types/bad_any_cast.h | 75 + .../absl/absl/types/bad_optional_access.cc | 66 + .../absl/absl/types/bad_optional_access.h | 78 + .../absl/absl/types/bad_variant_access.cc | 82 + .../absl/absl/types/bad_variant_access.h | 82 + third_party/absl/absl/types/compare.h | 505 +++ third_party/absl/absl/types/compare_test.cc | 300 ++ .../absl/absl/types/internal/optional.h | 352 ++ third_party/absl/absl/types/internal/span.h | 139 + .../absl/absl/types/internal/variant.h | 1634 ++++++++ third_party/absl/absl/types/optional.h | 782 ++++ .../types/optional_exception_safety_test.cc | 292 ++ third_party/absl/absl/types/optional_test.cc | 1658 ++++++++ third_party/absl/absl/types/span.h | 753 ++++ third_party/absl/absl/types/span_test.cc | 848 ++++ third_party/absl/absl/types/variant.h | 866 +++++ .../absl/absl/types/variant_benchmark.cc | 222 ++ .../types/variant_exception_safety_test.cc | 532 +++ third_party/absl/absl/types/variant_test.cc | 2718 +++++++++++++ third_party/absl/absl/utility/BUILD.bazel | 86 + third_party/absl/absl/utility/CMakeLists.txt | 68 + .../absl/absl/utility/internal/if_constexpr.h | 70 + .../utility/internal/if_constexpr_test.cc | 79 + third_party/absl/absl/utility/utility.h | 268 ++ third_party/absl/absl/utility/utility_test.cc | 247 ++ third_party/absl/ci/absl_alternate_options.h | 30 + third_party/absl/ci/cmake_common.sh | 29 + third_party/absl/ci/cmake_install_test.sh | 58 + .../ci/linux_arm_clang-latest_libcxx_bazel.sh | 100 + .../linux_clang-latest_libcxx_asan_bazel.sh | 102 + .../ci/linux_clang-latest_libcxx_bazel.sh | 100 + .../linux_clang-latest_libcxx_tsan_bazel.sh | 97 + .../ci/linux_clang-latest_libstdcxx_bazel.sh | 95 + .../absl/ci/linux_docker_containers.sh | 22 + .../ci/linux_gcc-floor_libstdcxx_bazel.sh | 95 + .../ci/linux_gcc-latest_libstdcxx_bazel.sh | 98 + .../ci/linux_gcc-latest_libstdcxx_cmake.sh | 66 + third_party/absl/ci/linux_gcc_alpine_cmake.sh | 65 + third_party/absl/ci/macos_xcode_bazel.sh | 66 + third_party/absl/ci/macos_xcode_cmake.sh | 57 + third_party/absl/ci/windows_clangcl_bazel.bat | 61 + third_party/absl/ci/windows_msvc_bazel.bat | 52 + third_party/absl/ci/windows_msvc_cmake.bat | 74 + third_party/absl/conanfile.py | 51 + third_party/absl/create_lts.py | 138 + 1523 files changed, 276788 insertions(+) create mode 100644 CMake/README.md create mode 100644 CMake/absl.cmake create mode 100644 third_party/.clang-format create mode 100644 third_party/README.md create mode 100644 third_party/absl/.clang-format create mode 100644 third_party/absl/.github/ISSUE_TEMPLATE/00-bug_report.yml create mode 100644 third_party/absl/.github/ISSUE_TEMPLATE/config.yml create mode 100644 third_party/absl/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 third_party/absl/.gitignore create mode 100644 third_party/absl/ABSEIL_ISSUE_TEMPLATE.md create mode 100644 third_party/absl/AUTHORS create mode 100644 third_party/absl/BUILD.bazel create mode 100644 third_party/absl/CMake/AbseilDll.cmake create mode 100644 third_party/absl/CMake/AbseilHelpers.cmake create mode 100644 third_party/absl/CMake/Googletest/CMakeLists.txt.in create mode 100644 third_party/absl/CMake/Googletest/DownloadGTest.cmake create mode 100644 third_party/absl/CMake/README.md create mode 100644 third_party/absl/CMake/abslConfig.cmake.in create mode 100644 third_party/absl/CMake/install_test_project/CMakeLists.txt create mode 100644 third_party/absl/CMake/install_test_project/simple.cc create mode 100755 third_party/absl/CMake/install_test_project/test.sh create mode 100644 third_party/absl/CMakeLists.txt create mode 100644 third_party/absl/CONTRIBUTING.md create mode 100644 third_party/absl/FAQ.md create mode 100644 third_party/absl/LICENSE create mode 100644 third_party/absl/MODULE.bazel create mode 100644 third_party/absl/PrivacyInfo.xcprivacy create mode 100644 third_party/absl/README.md create mode 100644 third_party/absl/UPGRADES.md create mode 100644 third_party/absl/WORKSPACE create mode 100644 third_party/absl/absl/BUILD.bazel create mode 100644 third_party/absl/absl/CMakeLists.txt create mode 100755 third_party/absl/absl/abseil.podspec.gen.py create mode 100644 third_party/absl/absl/algorithm/BUILD.bazel create mode 100644 third_party/absl/absl/algorithm/CMakeLists.txt create mode 100644 third_party/absl/absl/algorithm/algorithm.h create mode 100644 third_party/absl/absl/algorithm/algorithm_test.cc create mode 100644 third_party/absl/absl/algorithm/container.h create mode 100644 third_party/absl/absl/algorithm/container_test.cc create mode 100644 third_party/absl/absl/base/BUILD.bazel create mode 100644 third_party/absl/absl/base/CMakeLists.txt create mode 100644 third_party/absl/absl/base/attributes.h create mode 100644 third_party/absl/absl/base/bit_cast_test.cc create mode 100644 third_party/absl/absl/base/call_once.h create mode 100644 third_party/absl/absl/base/call_once_test.cc create mode 100644 third_party/absl/absl/base/casts.h create mode 100644 third_party/absl/absl/base/config.h create mode 100644 third_party/absl/absl/base/config_test.cc create mode 100644 third_party/absl/absl/base/const_init.h create mode 100644 third_party/absl/absl/base/dynamic_annotations.h create mode 100644 third_party/absl/absl/base/exception_safety_testing_test.cc create mode 100644 third_party/absl/absl/base/inline_variable_test.cc create mode 100644 third_party/absl/absl/base/inline_variable_test_a.cc create mode 100644 third_party/absl/absl/base/inline_variable_test_b.cc create mode 100644 third_party/absl/absl/base/internal/atomic_hook.h create mode 100644 third_party/absl/absl/base/internal/atomic_hook_test.cc create mode 100644 third_party/absl/absl/base/internal/atomic_hook_test_helper.cc create mode 100644 third_party/absl/absl/base/internal/atomic_hook_test_helper.h create mode 100644 third_party/absl/absl/base/internal/cmake_thread_test.cc create mode 100644 third_party/absl/absl/base/internal/cycleclock.cc create mode 100644 third_party/absl/absl/base/internal/cycleclock.h create mode 100644 third_party/absl/absl/base/internal/cycleclock_config.h create mode 100644 third_party/absl/absl/base/internal/direct_mmap.h create mode 100644 third_party/absl/absl/base/internal/dynamic_annotations.h create mode 100644 third_party/absl/absl/base/internal/endian.h create mode 100644 third_party/absl/absl/base/internal/endian_test.cc create mode 100644 third_party/absl/absl/base/internal/errno_saver.h create mode 100644 third_party/absl/absl/base/internal/errno_saver_test.cc create mode 100644 third_party/absl/absl/base/internal/exception_safety_testing.cc create mode 100644 third_party/absl/absl/base/internal/exception_safety_testing.h create mode 100644 third_party/absl/absl/base/internal/exception_testing.h create mode 100644 third_party/absl/absl/base/internal/fast_type_id.h create mode 100644 third_party/absl/absl/base/internal/fast_type_id_test.cc create mode 100644 third_party/absl/absl/base/internal/hide_ptr.h create mode 100644 third_party/absl/absl/base/internal/identity.h create mode 100644 third_party/absl/absl/base/internal/inline_variable.h create mode 100644 third_party/absl/absl/base/internal/inline_variable_testing.h create mode 100644 third_party/absl/absl/base/internal/invoke.h create mode 100644 third_party/absl/absl/base/internal/low_level_alloc.cc create mode 100644 third_party/absl/absl/base/internal/low_level_alloc.h create mode 100644 third_party/absl/absl/base/internal/low_level_alloc_test.cc create mode 100644 third_party/absl/absl/base/internal/low_level_scheduling.h create mode 100644 third_party/absl/absl/base/internal/nullability_impl.h create mode 100644 third_party/absl/absl/base/internal/per_thread_tls.h create mode 100644 third_party/absl/absl/base/internal/pretty_function.h create mode 100644 third_party/absl/absl/base/internal/raw_logging.cc create mode 100644 third_party/absl/absl/base/internal/raw_logging.h create mode 100644 third_party/absl/absl/base/internal/scheduling_mode.h create mode 100644 third_party/absl/absl/base/internal/scoped_set_env.cc create mode 100644 third_party/absl/absl/base/internal/scoped_set_env.h create mode 100644 third_party/absl/absl/base/internal/scoped_set_env_test.cc create mode 100644 third_party/absl/absl/base/internal/spinlock.cc create mode 100644 third_party/absl/absl/base/internal/spinlock.h create mode 100644 third_party/absl/absl/base/internal/spinlock_akaros.inc create mode 100644 third_party/absl/absl/base/internal/spinlock_benchmark.cc create mode 100644 third_party/absl/absl/base/internal/spinlock_linux.inc create mode 100644 third_party/absl/absl/base/internal/spinlock_posix.inc create mode 100644 third_party/absl/absl/base/internal/spinlock_wait.cc create mode 100644 third_party/absl/absl/base/internal/spinlock_wait.h create mode 100644 third_party/absl/absl/base/internal/spinlock_win32.inc create mode 100644 third_party/absl/absl/base/internal/strerror.cc create mode 100644 third_party/absl/absl/base/internal/strerror.h create mode 100644 third_party/absl/absl/base/internal/strerror_benchmark.cc create mode 100644 third_party/absl/absl/base/internal/strerror_test.cc create mode 100644 third_party/absl/absl/base/internal/sysinfo.cc create mode 100644 third_party/absl/absl/base/internal/sysinfo.h create mode 100644 third_party/absl/absl/base/internal/sysinfo_test.cc create mode 100644 third_party/absl/absl/base/internal/thread_identity.cc create mode 100644 third_party/absl/absl/base/internal/thread_identity.h create mode 100644 third_party/absl/absl/base/internal/thread_identity_benchmark.cc create mode 100644 third_party/absl/absl/base/internal/thread_identity_test.cc create mode 100644 third_party/absl/absl/base/internal/throw_delegate.cc create mode 100644 third_party/absl/absl/base/internal/throw_delegate.h create mode 100644 third_party/absl/absl/base/internal/tsan_mutex_interface.h create mode 100644 third_party/absl/absl/base/internal/unaligned_access.h create mode 100644 third_party/absl/absl/base/internal/unique_small_name_test.cc create mode 100644 third_party/absl/absl/base/internal/unscaledcycleclock.cc create mode 100644 third_party/absl/absl/base/internal/unscaledcycleclock.h create mode 100644 third_party/absl/absl/base/internal/unscaledcycleclock_config.h create mode 100644 third_party/absl/absl/base/invoke_test.cc create mode 100644 third_party/absl/absl/base/log_severity.cc create mode 100644 third_party/absl/absl/base/log_severity.h create mode 100644 third_party/absl/absl/base/log_severity_test.cc create mode 100644 third_party/absl/absl/base/macros.h create mode 100644 third_party/absl/absl/base/no_destructor.h create mode 100644 third_party/absl/absl/base/no_destructor_benchmark.cc create mode 100644 third_party/absl/absl/base/no_destructor_test.cc create mode 100644 third_party/absl/absl/base/nullability.h create mode 100644 third_party/absl/absl/base/nullability_test.cc create mode 100644 third_party/absl/absl/base/optimization.h create mode 100644 third_party/absl/absl/base/optimization_test.cc create mode 100644 third_party/absl/absl/base/options.h create mode 100644 third_party/absl/absl/base/policy_checks.h create mode 100644 third_party/absl/absl/base/port.h create mode 100644 third_party/absl/absl/base/prefetch.h create mode 100644 third_party/absl/absl/base/prefetch_test.cc create mode 100644 third_party/absl/absl/base/raw_logging_test.cc create mode 100644 third_party/absl/absl/base/spinlock_test_common.cc create mode 100644 third_party/absl/absl/base/thread_annotations.h create mode 100644 third_party/absl/absl/base/throw_delegate_test.cc create mode 100644 third_party/absl/absl/cleanup/BUILD.bazel create mode 100644 third_party/absl/absl/cleanup/CMakeLists.txt create mode 100644 third_party/absl/absl/cleanup/cleanup.h create mode 100644 third_party/absl/absl/cleanup/cleanup_test.cc create mode 100644 third_party/absl/absl/cleanup/internal/cleanup.h create mode 100644 third_party/absl/absl/container/BUILD.bazel create mode 100644 third_party/absl/absl/container/CMakeLists.txt create mode 100644 third_party/absl/absl/container/btree_benchmark.cc create mode 100644 third_party/absl/absl/container/btree_map.h create mode 100644 third_party/absl/absl/container/btree_set.h create mode 100644 third_party/absl/absl/container/btree_test.cc create mode 100644 third_party/absl/absl/container/btree_test.h create mode 100644 third_party/absl/absl/container/fixed_array.h create mode 100644 third_party/absl/absl/container/fixed_array_benchmark.cc create mode 100644 third_party/absl/absl/container/fixed_array_exception_safety_test.cc create mode 100644 third_party/absl/absl/container/fixed_array_test.cc create mode 100644 third_party/absl/absl/container/flat_hash_map.h create mode 100644 third_party/absl/absl/container/flat_hash_map_test.cc create mode 100644 third_party/absl/absl/container/flat_hash_set.h create mode 100644 third_party/absl/absl/container/flat_hash_set_test.cc create mode 100644 third_party/absl/absl/container/inlined_vector.h create mode 100644 third_party/absl/absl/container/inlined_vector_benchmark.cc create mode 100644 third_party/absl/absl/container/inlined_vector_exception_safety_test.cc create mode 100644 third_party/absl/absl/container/inlined_vector_test.cc create mode 100644 third_party/absl/absl/container/internal/btree.h create mode 100644 third_party/absl/absl/container/internal/btree_container.h create mode 100644 third_party/absl/absl/container/internal/common.h create mode 100644 third_party/absl/absl/container/internal/common_policy_traits.h create mode 100644 third_party/absl/absl/container/internal/common_policy_traits_test.cc create mode 100644 third_party/absl/absl/container/internal/compressed_tuple.h create mode 100644 third_party/absl/absl/container/internal/compressed_tuple_test.cc create mode 100644 third_party/absl/absl/container/internal/container_memory.h create mode 100644 third_party/absl/absl/container/internal/container_memory_test.cc create mode 100644 third_party/absl/absl/container/internal/hash_function_defaults.h create mode 100644 third_party/absl/absl/container/internal/hash_function_defaults_test.cc create mode 100644 third_party/absl/absl/container/internal/hash_generator_testing.cc create mode 100644 third_party/absl/absl/container/internal/hash_generator_testing.h create mode 100644 third_party/absl/absl/container/internal/hash_policy_testing.h create mode 100644 third_party/absl/absl/container/internal/hash_policy_testing_test.cc create mode 100644 third_party/absl/absl/container/internal/hash_policy_traits.h create mode 100644 third_party/absl/absl/container/internal/hash_policy_traits_test.cc create mode 100644 third_party/absl/absl/container/internal/hashtable_debug.h create mode 100644 third_party/absl/absl/container/internal/hashtable_debug_hooks.h create mode 100644 third_party/absl/absl/container/internal/hashtablez_sampler.cc create mode 100644 third_party/absl/absl/container/internal/hashtablez_sampler.h create mode 100644 third_party/absl/absl/container/internal/hashtablez_sampler_force_weak_definition.cc create mode 100644 third_party/absl/absl/container/internal/hashtablez_sampler_test.cc create mode 100644 third_party/absl/absl/container/internal/inlined_vector.h create mode 100644 third_party/absl/absl/container/internal/layout.h create mode 100644 third_party/absl/absl/container/internal/layout_benchmark.cc create mode 100644 third_party/absl/absl/container/internal/layout_test.cc create mode 100644 third_party/absl/absl/container/internal/node_slot_policy.h create mode 100644 third_party/absl/absl/container/internal/node_slot_policy_test.cc create mode 100644 third_party/absl/absl/container/internal/raw_hash_map.h create mode 100644 third_party/absl/absl/container/internal/raw_hash_set.cc create mode 100644 third_party/absl/absl/container/internal/raw_hash_set.h create mode 100644 third_party/absl/absl/container/internal/raw_hash_set_allocator_test.cc create mode 100644 third_party/absl/absl/container/internal/raw_hash_set_benchmark.cc create mode 100644 third_party/absl/absl/container/internal/raw_hash_set_probe_benchmark.cc create mode 100644 third_party/absl/absl/container/internal/raw_hash_set_test.cc create mode 100644 third_party/absl/absl/container/internal/test_allocator.h create mode 100644 third_party/absl/absl/container/internal/test_instance_tracker.cc create mode 100644 third_party/absl/absl/container/internal/test_instance_tracker.h create mode 100644 third_party/absl/absl/container/internal/test_instance_tracker_test.cc create mode 100644 third_party/absl/absl/container/internal/tracked.h create mode 100644 third_party/absl/absl/container/internal/unordered_map_constructor_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_map_lookup_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_map_members_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_map_modifiers_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_map_test.cc create mode 100644 third_party/absl/absl/container/internal/unordered_set_constructor_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_set_lookup_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_set_members_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_set_modifiers_test.h create mode 100644 third_party/absl/absl/container/internal/unordered_set_test.cc create mode 100644 third_party/absl/absl/container/node_hash_map.h create mode 100644 third_party/absl/absl/container/node_hash_map_test.cc create mode 100644 third_party/absl/absl/container/node_hash_set.h create mode 100644 third_party/absl/absl/container/node_hash_set_test.cc create mode 100644 third_party/absl/absl/container/sample_element_size_test.cc create mode 100644 third_party/absl/absl/copts/AbseilConfigureCopts.cmake create mode 100644 third_party/absl/absl/copts/GENERATED_AbseilCopts.cmake create mode 100644 third_party/absl/absl/copts/GENERATED_copts.bzl create mode 100644 third_party/absl/absl/copts/configure_copts.bzl create mode 100644 third_party/absl/absl/copts/copts.py create mode 100755 third_party/absl/absl/copts/generate_copts.py create mode 100644 third_party/absl/absl/crc/BUILD.bazel create mode 100644 third_party/absl/absl/crc/CMakeLists.txt create mode 100644 third_party/absl/absl/crc/crc32c.cc create mode 100644 third_party/absl/absl/crc/crc32c.h create mode 100644 third_party/absl/absl/crc/crc32c_benchmark.cc create mode 100644 third_party/absl/absl/crc/crc32c_test.cc create mode 100644 third_party/absl/absl/crc/internal/cpu_detect.cc create mode 100644 third_party/absl/absl/crc/internal/cpu_detect.h create mode 100644 third_party/absl/absl/crc/internal/crc.cc create mode 100644 third_party/absl/absl/crc/internal/crc.h create mode 100644 third_party/absl/absl/crc/internal/crc32_x86_arm_combined_simd.h create mode 100644 third_party/absl/absl/crc/internal/crc32c.h create mode 100644 third_party/absl/absl/crc/internal/crc32c_inline.h create mode 100644 third_party/absl/absl/crc/internal/crc_cord_state.cc create mode 100644 third_party/absl/absl/crc/internal/crc_cord_state.h create mode 100644 third_party/absl/absl/crc/internal/crc_cord_state_test.cc create mode 100644 third_party/absl/absl/crc/internal/crc_internal.h create mode 100644 third_party/absl/absl/crc/internal/crc_memcpy.h create mode 100644 third_party/absl/absl/crc/internal/crc_memcpy_fallback.cc create mode 100644 third_party/absl/absl/crc/internal/crc_memcpy_test.cc create mode 100644 third_party/absl/absl/crc/internal/crc_memcpy_x86_arm_combined.cc create mode 100644 third_party/absl/absl/crc/internal/crc_non_temporal_memcpy.cc create mode 100644 third_party/absl/absl/crc/internal/crc_x86_arm_combined.cc create mode 100644 third_party/absl/absl/crc/internal/non_temporal_arm_intrinsics.h create mode 100644 third_party/absl/absl/crc/internal/non_temporal_memcpy.h create mode 100644 third_party/absl/absl/crc/internal/non_temporal_memcpy_test.cc create mode 100644 third_party/absl/absl/debugging/BUILD.bazel create mode 100644 third_party/absl/absl/debugging/CMakeLists.txt create mode 100644 third_party/absl/absl/debugging/failure_signal_handler.cc create mode 100644 third_party/absl/absl/debugging/failure_signal_handler.h create mode 100644 third_party/absl/absl/debugging/failure_signal_handler_test.cc create mode 100644 third_party/absl/absl/debugging/internal/address_is_readable.cc create mode 100644 third_party/absl/absl/debugging/internal/address_is_readable.h create mode 100644 third_party/absl/absl/debugging/internal/demangle.cc create mode 100644 third_party/absl/absl/debugging/internal/demangle.h create mode 100644 third_party/absl/absl/debugging/internal/demangle_test.cc create mode 100644 third_party/absl/absl/debugging/internal/elf_mem_image.cc create mode 100644 third_party/absl/absl/debugging/internal/elf_mem_image.h create mode 100644 third_party/absl/absl/debugging/internal/examine_stack.cc create mode 100644 third_party/absl/absl/debugging/internal/examine_stack.h create mode 100644 third_party/absl/absl/debugging/internal/stack_consumption.cc create mode 100644 third_party/absl/absl/debugging/internal/stack_consumption.h create mode 100644 third_party/absl/absl/debugging/internal/stack_consumption_test.cc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_aarch64-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_arm-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_config.h create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_emscripten-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_generic-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_powerpc-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_riscv-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_unimplemented-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_win32-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/stacktrace_x86-inl.inc create mode 100644 third_party/absl/absl/debugging/internal/symbolize.h create mode 100644 third_party/absl/absl/debugging/internal/vdso_support.cc create mode 100644 third_party/absl/absl/debugging/internal/vdso_support.h create mode 100644 third_party/absl/absl/debugging/leak_check.cc create mode 100644 third_party/absl/absl/debugging/leak_check.h create mode 100644 third_party/absl/absl/debugging/leak_check_fail_test.cc create mode 100644 third_party/absl/absl/debugging/leak_check_test.cc create mode 100644 third_party/absl/absl/debugging/stacktrace.cc create mode 100644 third_party/absl/absl/debugging/stacktrace.h create mode 100644 third_party/absl/absl/debugging/stacktrace_benchmark.cc create mode 100644 third_party/absl/absl/debugging/stacktrace_test.cc create mode 100644 third_party/absl/absl/debugging/symbolize.cc create mode 100644 third_party/absl/absl/debugging/symbolize.h create mode 100644 third_party/absl/absl/debugging/symbolize_darwin.inc create mode 100644 third_party/absl/absl/debugging/symbolize_elf.inc create mode 100644 third_party/absl/absl/debugging/symbolize_emscripten.inc create mode 100644 third_party/absl/absl/debugging/symbolize_test.cc create mode 100644 third_party/absl/absl/debugging/symbolize_unimplemented.inc create mode 100644 third_party/absl/absl/debugging/symbolize_win32.inc create mode 100644 third_party/absl/absl/flags/BUILD.bazel create mode 100644 third_party/absl/absl/flags/CMakeLists.txt create mode 100644 third_party/absl/absl/flags/commandlineflag.cc create mode 100644 third_party/absl/absl/flags/commandlineflag.h create mode 100644 third_party/absl/absl/flags/commandlineflag_test.cc create mode 100644 third_party/absl/absl/flags/config.h create mode 100644 third_party/absl/absl/flags/config_test.cc create mode 100644 third_party/absl/absl/flags/declare.h create mode 100644 third_party/absl/absl/flags/flag.h create mode 100644 third_party/absl/absl/flags/flag_benchmark.cc create mode 100644 third_party/absl/absl/flags/flag_benchmark.lds create mode 100644 third_party/absl/absl/flags/flag_test.cc create mode 100644 third_party/absl/absl/flags/flag_test_defs.cc create mode 100644 third_party/absl/absl/flags/internal/commandlineflag.cc create mode 100644 third_party/absl/absl/flags/internal/commandlineflag.h create mode 100644 third_party/absl/absl/flags/internal/flag.cc create mode 100644 third_party/absl/absl/flags/internal/flag.h create mode 100644 third_party/absl/absl/flags/internal/parse.h create mode 100644 third_party/absl/absl/flags/internal/path_util.h create mode 100644 third_party/absl/absl/flags/internal/path_util_test.cc create mode 100644 third_party/absl/absl/flags/internal/private_handle_accessor.cc create mode 100644 third_party/absl/absl/flags/internal/private_handle_accessor.h create mode 100644 third_party/absl/absl/flags/internal/program_name.cc create mode 100644 third_party/absl/absl/flags/internal/program_name.h create mode 100644 third_party/absl/absl/flags/internal/program_name_test.cc create mode 100644 third_party/absl/absl/flags/internal/registry.h create mode 100644 third_party/absl/absl/flags/internal/sequence_lock.h create mode 100644 third_party/absl/absl/flags/internal/sequence_lock_test.cc create mode 100644 third_party/absl/absl/flags/internal/usage.cc create mode 100644 third_party/absl/absl/flags/internal/usage.h create mode 100644 third_party/absl/absl/flags/internal/usage_test.cc create mode 100644 third_party/absl/absl/flags/marshalling.cc create mode 100644 third_party/absl/absl/flags/marshalling.h create mode 100644 third_party/absl/absl/flags/marshalling_test.cc create mode 100644 third_party/absl/absl/flags/parse.cc create mode 100644 third_party/absl/absl/flags/parse.h create mode 100644 third_party/absl/absl/flags/parse_test.cc create mode 100644 third_party/absl/absl/flags/reflection.cc create mode 100644 third_party/absl/absl/flags/reflection.h create mode 100644 third_party/absl/absl/flags/reflection_test.cc create mode 100644 third_party/absl/absl/flags/usage.cc create mode 100644 third_party/absl/absl/flags/usage.h create mode 100644 third_party/absl/absl/flags/usage_config.cc create mode 100644 third_party/absl/absl/flags/usage_config.h create mode 100644 third_party/absl/absl/flags/usage_config_test.cc create mode 100644 third_party/absl/absl/functional/BUILD.bazel create mode 100644 third_party/absl/absl/functional/CMakeLists.txt create mode 100644 third_party/absl/absl/functional/any_invocable.h create mode 100644 third_party/absl/absl/functional/any_invocable_test.cc create mode 100644 third_party/absl/absl/functional/bind_front.h create mode 100644 third_party/absl/absl/functional/bind_front_test.cc create mode 100644 third_party/absl/absl/functional/function_ref.h create mode 100644 third_party/absl/absl/functional/function_ref_test.cc create mode 100644 third_party/absl/absl/functional/function_type_benchmark.cc create mode 100644 third_party/absl/absl/functional/internal/any_invocable.h create mode 100644 third_party/absl/absl/functional/internal/front_binder.h create mode 100644 third_party/absl/absl/functional/internal/function_ref.h create mode 100644 third_party/absl/absl/functional/overload.h create mode 100644 third_party/absl/absl/functional/overload_test.cc create mode 100644 third_party/absl/absl/hash/BUILD.bazel create mode 100644 third_party/absl/absl/hash/CMakeLists.txt create mode 100644 third_party/absl/absl/hash/hash.h create mode 100644 third_party/absl/absl/hash/hash_benchmark.cc create mode 100644 third_party/absl/absl/hash/hash_instantiated_test.cc create mode 100644 third_party/absl/absl/hash/hash_test.cc create mode 100644 third_party/absl/absl/hash/hash_testing.h create mode 100644 third_party/absl/absl/hash/internal/city.cc create mode 100644 third_party/absl/absl/hash/internal/city.h create mode 100644 third_party/absl/absl/hash/internal/city_test.cc create mode 100644 third_party/absl/absl/hash/internal/hash.cc create mode 100644 third_party/absl/absl/hash/internal/hash.h create mode 100644 third_party/absl/absl/hash/internal/hash_test.h create mode 100644 third_party/absl/absl/hash/internal/low_level_hash.cc create mode 100644 third_party/absl/absl/hash/internal/low_level_hash.h create mode 100644 third_party/absl/absl/hash/internal/low_level_hash_test.cc create mode 100644 third_party/absl/absl/hash/internal/print_hash_of.cc create mode 100644 third_party/absl/absl/hash/internal/spy_hash_state.h create mode 100644 third_party/absl/absl/log/BUILD.bazel create mode 100644 third_party/absl/absl/log/CMakeLists.txt create mode 100644 third_party/absl/absl/log/absl_check.h create mode 100644 third_party/absl/absl/log/absl_check_test.cc create mode 100644 third_party/absl/absl/log/absl_log.h create mode 100644 third_party/absl/absl/log/absl_log_basic_test.cc create mode 100644 third_party/absl/absl/log/absl_vlog_is_on.h create mode 100644 third_party/absl/absl/log/check.h create mode 100644 third_party/absl/absl/log/check_test.cc create mode 100644 third_party/absl/absl/log/check_test_impl.inc create mode 100644 third_party/absl/absl/log/die_if_null.cc create mode 100644 third_party/absl/absl/log/die_if_null.h create mode 100644 third_party/absl/absl/log/die_if_null_test.cc create mode 100644 third_party/absl/absl/log/flags.cc create mode 100644 third_party/absl/absl/log/flags.h create mode 100644 third_party/absl/absl/log/flags_test.cc create mode 100644 third_party/absl/absl/log/globals.cc create mode 100644 third_party/absl/absl/log/globals.h create mode 100644 third_party/absl/absl/log/globals_test.cc create mode 100644 third_party/absl/absl/log/initialize.cc create mode 100644 third_party/absl/absl/log/initialize.h create mode 100644 third_party/absl/absl/log/internal/BUILD.bazel create mode 100644 third_party/absl/absl/log/internal/append_truncated.h create mode 100644 third_party/absl/absl/log/internal/check_impl.h create mode 100644 third_party/absl/absl/log/internal/check_op.cc create mode 100644 third_party/absl/absl/log/internal/check_op.h create mode 100644 third_party/absl/absl/log/internal/conditions.cc create mode 100644 third_party/absl/absl/log/internal/conditions.h create mode 100644 third_party/absl/absl/log/internal/config.h create mode 100644 third_party/absl/absl/log/internal/flags.h create mode 100644 third_party/absl/absl/log/internal/fnmatch.cc create mode 100644 third_party/absl/absl/log/internal/fnmatch.h create mode 100644 third_party/absl/absl/log/internal/fnmatch_benchmark.cc create mode 100644 third_party/absl/absl/log/internal/fnmatch_test.cc create mode 100644 third_party/absl/absl/log/internal/globals.cc create mode 100644 third_party/absl/absl/log/internal/globals.h create mode 100644 third_party/absl/absl/log/internal/log_format.cc create mode 100644 third_party/absl/absl/log/internal/log_format.h create mode 100644 third_party/absl/absl/log/internal/log_impl.h create mode 100644 third_party/absl/absl/log/internal/log_message.cc create mode 100644 third_party/absl/absl/log/internal/log_message.h create mode 100644 third_party/absl/absl/log/internal/log_sink_set.cc create mode 100644 third_party/absl/absl/log/internal/log_sink_set.h create mode 100644 third_party/absl/absl/log/internal/nullguard.cc create mode 100644 third_party/absl/absl/log/internal/nullguard.h create mode 100644 third_party/absl/absl/log/internal/nullstream.h create mode 100644 third_party/absl/absl/log/internal/proto.cc create mode 100644 third_party/absl/absl/log/internal/proto.h create mode 100644 third_party/absl/absl/log/internal/stderr_log_sink_test.cc create mode 100644 third_party/absl/absl/log/internal/strip.h create mode 100644 third_party/absl/absl/log/internal/structured.h create mode 100644 third_party/absl/absl/log/internal/test_actions.cc create mode 100644 third_party/absl/absl/log/internal/test_actions.h create mode 100644 third_party/absl/absl/log/internal/test_helpers.cc create mode 100644 third_party/absl/absl/log/internal/test_helpers.h create mode 100644 third_party/absl/absl/log/internal/test_matchers.cc create mode 100644 third_party/absl/absl/log/internal/test_matchers.h create mode 100644 third_party/absl/absl/log/internal/vlog_config.cc create mode 100644 third_party/absl/absl/log/internal/vlog_config.h create mode 100644 third_party/absl/absl/log/internal/vlog_config_benchmark.cc create mode 100644 third_party/absl/absl/log/internal/voidify.h create mode 100644 third_party/absl/absl/log/log.h create mode 100644 third_party/absl/absl/log/log_basic_test.cc create mode 100644 third_party/absl/absl/log/log_basic_test_impl.inc create mode 100644 third_party/absl/absl/log/log_benchmark.cc create mode 100644 third_party/absl/absl/log/log_entry.cc create mode 100644 third_party/absl/absl/log/log_entry.h create mode 100644 third_party/absl/absl/log/log_entry_test.cc create mode 100644 third_party/absl/absl/log/log_format_test.cc create mode 100644 third_party/absl/absl/log/log_macro_hygiene_test.cc create mode 100644 third_party/absl/absl/log/log_modifier_methods_test.cc create mode 100644 third_party/absl/absl/log/log_sink.cc create mode 100644 third_party/absl/absl/log/log_sink.h create mode 100644 third_party/absl/absl/log/log_sink_registry.h create mode 100644 third_party/absl/absl/log/log_sink_test.cc create mode 100644 third_party/absl/absl/log/log_streamer.h create mode 100644 third_party/absl/absl/log/log_streamer_test.cc create mode 100644 third_party/absl/absl/log/scoped_mock_log.cc create mode 100644 third_party/absl/absl/log/scoped_mock_log.h create mode 100644 third_party/absl/absl/log/scoped_mock_log_test.cc create mode 100644 third_party/absl/absl/log/stripping_test.cc create mode 100644 third_party/absl/absl/log/structured.h create mode 100644 third_party/absl/absl/log/structured_test.cc create mode 100644 third_party/absl/absl/log/vlog_is_on.h create mode 100644 third_party/absl/absl/log/vlog_is_on_test.cc create mode 100644 third_party/absl/absl/memory/BUILD.bazel create mode 100644 third_party/absl/absl/memory/CMakeLists.txt create mode 100644 third_party/absl/absl/memory/memory.h create mode 100644 third_party/absl/absl/memory/memory_test.cc create mode 100644 third_party/absl/absl/meta/BUILD.bazel create mode 100644 third_party/absl/absl/meta/CMakeLists.txt create mode 100644 third_party/absl/absl/meta/type_traits.h create mode 100644 third_party/absl/absl/meta/type_traits_test.cc create mode 100644 third_party/absl/absl/numeric/BUILD.bazel create mode 100644 third_party/absl/absl/numeric/CMakeLists.txt create mode 100644 third_party/absl/absl/numeric/bits.h create mode 100644 third_party/absl/absl/numeric/bits_benchmark.cc create mode 100644 third_party/absl/absl/numeric/bits_test.cc create mode 100644 third_party/absl/absl/numeric/int128.cc create mode 100644 third_party/absl/absl/numeric/int128.h create mode 100644 third_party/absl/absl/numeric/int128_benchmark.cc create mode 100644 third_party/absl/absl/numeric/int128_have_intrinsic.inc create mode 100644 third_party/absl/absl/numeric/int128_no_intrinsic.inc create mode 100644 third_party/absl/absl/numeric/int128_stream_test.cc create mode 100644 third_party/absl/absl/numeric/int128_test.cc create mode 100644 third_party/absl/absl/numeric/internal/bits.h create mode 100644 third_party/absl/absl/numeric/internal/representation.h create mode 100644 third_party/absl/absl/profiling/BUILD.bazel create mode 100644 third_party/absl/absl/profiling/CMakeLists.txt create mode 100644 third_party/absl/absl/profiling/internal/exponential_biased.cc create mode 100644 third_party/absl/absl/profiling/internal/exponential_biased.h create mode 100644 third_party/absl/absl/profiling/internal/exponential_biased_test.cc create mode 100644 third_party/absl/absl/profiling/internal/periodic_sampler.cc create mode 100644 third_party/absl/absl/profiling/internal/periodic_sampler.h create mode 100644 third_party/absl/absl/profiling/internal/periodic_sampler_benchmark.cc create mode 100644 third_party/absl/absl/profiling/internal/periodic_sampler_test.cc create mode 100644 third_party/absl/absl/profiling/internal/sample_recorder.h create mode 100644 third_party/absl/absl/profiling/internal/sample_recorder_test.cc create mode 100644 third_party/absl/absl/random/BUILD.bazel create mode 100644 third_party/absl/absl/random/CMakeLists.txt create mode 100644 third_party/absl/absl/random/benchmarks.cc create mode 100644 third_party/absl/absl/random/bernoulli_distribution.h create mode 100644 third_party/absl/absl/random/bernoulli_distribution_test.cc create mode 100644 third_party/absl/absl/random/beta_distribution.h create mode 100644 third_party/absl/absl/random/beta_distribution_test.cc create mode 100644 third_party/absl/absl/random/bit_gen_ref.h create mode 100644 third_party/absl/absl/random/bit_gen_ref_test.cc create mode 100644 third_party/absl/absl/random/discrete_distribution.cc create mode 100644 third_party/absl/absl/random/discrete_distribution.h create mode 100644 third_party/absl/absl/random/discrete_distribution_test.cc create mode 100644 third_party/absl/absl/random/distributions.h create mode 100644 third_party/absl/absl/random/distributions_test.cc create mode 100644 third_party/absl/absl/random/examples_test.cc create mode 100644 third_party/absl/absl/random/exponential_distribution.h create mode 100644 third_party/absl/absl/random/exponential_distribution_test.cc create mode 100644 third_party/absl/absl/random/gaussian_distribution.cc create mode 100644 third_party/absl/absl/random/gaussian_distribution.h create mode 100644 third_party/absl/absl/random/gaussian_distribution_test.cc create mode 100644 third_party/absl/absl/random/generators_test.cc create mode 100644 third_party/absl/absl/random/internal/BUILD.bazel create mode 100644 third_party/absl/absl/random/internal/chi_square.cc create mode 100644 third_party/absl/absl/random/internal/chi_square.h create mode 100644 third_party/absl/absl/random/internal/chi_square_test.cc create mode 100644 third_party/absl/absl/random/internal/distribution_caller.h create mode 100644 third_party/absl/absl/random/internal/distribution_test_util.cc create mode 100644 third_party/absl/absl/random/internal/distribution_test_util.h create mode 100644 third_party/absl/absl/random/internal/distribution_test_util_test.cc create mode 100644 third_party/absl/absl/random/internal/explicit_seed_seq.h create mode 100644 third_party/absl/absl/random/internal/explicit_seed_seq_test.cc create mode 100644 third_party/absl/absl/random/internal/fast_uniform_bits.h create mode 100644 third_party/absl/absl/random/internal/fast_uniform_bits_test.cc create mode 100644 third_party/absl/absl/random/internal/fastmath.h create mode 100644 third_party/absl/absl/random/internal/fastmath_test.cc create mode 100644 third_party/absl/absl/random/internal/gaussian_distribution_gentables.cc create mode 100644 third_party/absl/absl/random/internal/generate_real.h create mode 100644 third_party/absl/absl/random/internal/generate_real_test.cc create mode 100644 third_party/absl/absl/random/internal/iostream_state_saver.h create mode 100644 third_party/absl/absl/random/internal/iostream_state_saver_test.cc create mode 100644 third_party/absl/absl/random/internal/mock_helpers.h create mode 100644 third_party/absl/absl/random/internal/mock_overload_set.h create mode 100644 third_party/absl/absl/random/internal/nanobenchmark.cc create mode 100644 third_party/absl/absl/random/internal/nanobenchmark.h create mode 100644 third_party/absl/absl/random/internal/nanobenchmark_test.cc create mode 100644 third_party/absl/absl/random/internal/nonsecure_base.h create mode 100644 third_party/absl/absl/random/internal/nonsecure_base_test.cc create mode 100644 third_party/absl/absl/random/internal/pcg_engine.h create mode 100644 third_party/absl/absl/random/internal/pcg_engine_test.cc create mode 100644 third_party/absl/absl/random/internal/platform.h create mode 100644 third_party/absl/absl/random/internal/pool_urbg.cc create mode 100644 third_party/absl/absl/random/internal/pool_urbg.h create mode 100644 third_party/absl/absl/random/internal/pool_urbg_test.cc create mode 100644 third_party/absl/absl/random/internal/randen.cc create mode 100644 third_party/absl/absl/random/internal/randen.h create mode 100644 third_party/absl/absl/random/internal/randen_benchmarks.cc create mode 100644 third_party/absl/absl/random/internal/randen_detect.cc create mode 100644 third_party/absl/absl/random/internal/randen_detect.h create mode 100644 third_party/absl/absl/random/internal/randen_engine.h create mode 100644 third_party/absl/absl/random/internal/randen_engine_test.cc create mode 100644 third_party/absl/absl/random/internal/randen_hwaes.cc create mode 100644 third_party/absl/absl/random/internal/randen_hwaes.h create mode 100644 third_party/absl/absl/random/internal/randen_hwaes_test.cc create mode 100644 third_party/absl/absl/random/internal/randen_round_keys.cc create mode 100644 third_party/absl/absl/random/internal/randen_slow.cc create mode 100644 third_party/absl/absl/random/internal/randen_slow.h create mode 100644 third_party/absl/absl/random/internal/randen_slow_test.cc create mode 100644 third_party/absl/absl/random/internal/randen_test.cc create mode 100644 third_party/absl/absl/random/internal/randen_traits.h create mode 100644 third_party/absl/absl/random/internal/salted_seed_seq.h create mode 100644 third_party/absl/absl/random/internal/salted_seed_seq_test.cc create mode 100644 third_party/absl/absl/random/internal/seed_material.cc create mode 100644 third_party/absl/absl/random/internal/seed_material.h create mode 100644 third_party/absl/absl/random/internal/seed_material_test.cc create mode 100644 third_party/absl/absl/random/internal/sequence_urbg.h create mode 100644 third_party/absl/absl/random/internal/traits.h create mode 100644 third_party/absl/absl/random/internal/traits_test.cc create mode 100644 third_party/absl/absl/random/internal/uniform_helper.h create mode 100644 third_party/absl/absl/random/internal/uniform_helper_test.cc create mode 100644 third_party/absl/absl/random/internal/wide_multiply.h create mode 100644 third_party/absl/absl/random/internal/wide_multiply_test.cc create mode 100644 third_party/absl/absl/random/log_uniform_int_distribution.h create mode 100644 third_party/absl/absl/random/log_uniform_int_distribution_test.cc create mode 100644 third_party/absl/absl/random/mock_distributions.h create mode 100644 third_party/absl/absl/random/mock_distributions_test.cc create mode 100644 third_party/absl/absl/random/mocking_bit_gen.h create mode 100644 third_party/absl/absl/random/mocking_bit_gen_test.cc create mode 100644 third_party/absl/absl/random/poisson_distribution.h create mode 100644 third_party/absl/absl/random/poisson_distribution_test.cc create mode 100644 third_party/absl/absl/random/random.h create mode 100644 third_party/absl/absl/random/seed_gen_exception.cc create mode 100644 third_party/absl/absl/random/seed_gen_exception.h create mode 100644 third_party/absl/absl/random/seed_sequences.cc create mode 100644 third_party/absl/absl/random/seed_sequences.h create mode 100644 third_party/absl/absl/random/seed_sequences_test.cc create mode 100644 third_party/absl/absl/random/uniform_int_distribution.h create mode 100644 third_party/absl/absl/random/uniform_int_distribution_test.cc create mode 100644 third_party/absl/absl/random/uniform_real_distribution.h create mode 100644 third_party/absl/absl/random/uniform_real_distribution_test.cc create mode 100644 third_party/absl/absl/random/zipf_distribution.h create mode 100644 third_party/absl/absl/random/zipf_distribution_test.cc create mode 100644 third_party/absl/absl/status/BUILD.bazel create mode 100644 third_party/absl/absl/status/CMakeLists.txt create mode 100644 third_party/absl/absl/status/internal/status_internal.cc create mode 100644 third_party/absl/absl/status/internal/status_internal.h create mode 100644 third_party/absl/absl/status/internal/statusor_internal.h create mode 100644 third_party/absl/absl/status/status.cc create mode 100644 third_party/absl/absl/status/status.h create mode 100644 third_party/absl/absl/status/status_payload_printer.cc create mode 100644 third_party/absl/absl/status/status_payload_printer.h create mode 100644 third_party/absl/absl/status/status_test.cc create mode 100644 third_party/absl/absl/status/statusor.cc create mode 100644 third_party/absl/absl/status/statusor.h create mode 100644 third_party/absl/absl/status/statusor_test.cc create mode 100644 third_party/absl/absl/strings/BUILD.bazel create mode 100644 third_party/absl/absl/strings/CMakeLists.txt create mode 100644 third_party/absl/absl/strings/ascii.cc create mode 100644 third_party/absl/absl/strings/ascii.h create mode 100644 third_party/absl/absl/strings/ascii_benchmark.cc create mode 100644 third_party/absl/absl/strings/ascii_test.cc create mode 100644 third_party/absl/absl/strings/atod_manual_test.cc create mode 100644 third_party/absl/absl/strings/char_formatting_test.cc create mode 100644 third_party/absl/absl/strings/charconv.cc create mode 100644 third_party/absl/absl/strings/charconv.h create mode 100644 third_party/absl/absl/strings/charconv_benchmark.cc create mode 100644 third_party/absl/absl/strings/charconv_test.cc create mode 100644 third_party/absl/absl/strings/charset.h create mode 100644 third_party/absl/absl/strings/charset_benchmark.cc create mode 100644 third_party/absl/absl/strings/charset_test.cc create mode 100644 third_party/absl/absl/strings/cord.cc create mode 100644 third_party/absl/absl/strings/cord.h create mode 100644 third_party/absl/absl/strings/cord_analysis.cc create mode 100644 third_party/absl/absl/strings/cord_analysis.h create mode 100644 third_party/absl/absl/strings/cord_buffer.cc create mode 100644 third_party/absl/absl/strings/cord_buffer.h create mode 100644 third_party/absl/absl/strings/cord_buffer_test.cc create mode 100644 third_party/absl/absl/strings/cord_test.cc create mode 100644 third_party/absl/absl/strings/cord_test_helpers.h create mode 100644 third_party/absl/absl/strings/cordz_test.cc create mode 100644 third_party/absl/absl/strings/cordz_test_helpers.h create mode 100644 third_party/absl/absl/strings/escaping.cc create mode 100644 third_party/absl/absl/strings/escaping.h create mode 100644 third_party/absl/absl/strings/escaping_benchmark.cc create mode 100644 third_party/absl/absl/strings/escaping_test.cc create mode 100644 third_party/absl/absl/strings/has_absl_stringify.h create mode 100644 third_party/absl/absl/strings/has_absl_stringify_test.cc create mode 100644 third_party/absl/absl/strings/has_ostream_operator.h create mode 100644 third_party/absl/absl/strings/has_ostream_operator_test.cc create mode 100644 third_party/absl/absl/strings/internal/charconv_bigint.cc create mode 100644 third_party/absl/absl/strings/internal/charconv_bigint.h create mode 100644 third_party/absl/absl/strings/internal/charconv_bigint_test.cc create mode 100644 third_party/absl/absl/strings/internal/charconv_parse.cc create mode 100644 third_party/absl/absl/strings/internal/charconv_parse.h create mode 100644 third_party/absl/absl/strings/internal/charconv_parse_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_data_edge.h create mode 100644 third_party/absl/absl/strings/internal/cord_data_edge_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_internal.cc create mode 100644 third_party/absl/absl/strings/internal/cord_internal.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_navigator.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_navigator.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_navigator_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_reader.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_reader.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_reader_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_btree_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_consume.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_consume.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_crc.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_crc.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_crc_test.cc create mode 100644 third_party/absl/absl/strings/internal/cord_rep_flat.h create mode 100644 third_party/absl/absl/strings/internal/cord_rep_test_util.h create mode 100644 third_party/absl/absl/strings/internal/cordz_functions.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_functions.h create mode 100644 third_party/absl/absl/strings/internal/cordz_functions_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_handle.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_handle.h create mode 100644 third_party/absl/absl/strings/internal/cordz_handle_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_info.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_info.h create mode 100644 third_party/absl/absl/strings/internal/cordz_info_statistics_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_info_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_sample_token.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_sample_token.h create mode 100644 third_party/absl/absl/strings/internal/cordz_sample_token_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_statistics.h create mode 100644 third_party/absl/absl/strings/internal/cordz_update_scope.h create mode 100644 third_party/absl/absl/strings/internal/cordz_update_scope_test.cc create mode 100644 third_party/absl/absl/strings/internal/cordz_update_tracker.h create mode 100644 third_party/absl/absl/strings/internal/cordz_update_tracker_test.cc create mode 100644 third_party/absl/absl/strings/internal/damerau_levenshtein_distance.cc create mode 100644 third_party/absl/absl/strings/internal/damerau_levenshtein_distance.h create mode 100644 third_party/absl/absl/strings/internal/damerau_levenshtein_distance_test.cc create mode 100644 third_party/absl/absl/strings/internal/escaping.cc create mode 100644 third_party/absl/absl/strings/internal/escaping.h create mode 100644 third_party/absl/absl/strings/internal/escaping_test_common.h create mode 100644 third_party/absl/absl/strings/internal/has_absl_stringify.h create mode 100644 third_party/absl/absl/strings/internal/memutil.cc create mode 100644 third_party/absl/absl/strings/internal/memutil.h create mode 100644 third_party/absl/absl/strings/internal/memutil_benchmark.cc create mode 100644 third_party/absl/absl/strings/internal/memutil_test.cc create mode 100644 third_party/absl/absl/strings/internal/numbers_test_common.h create mode 100644 third_party/absl/absl/strings/internal/ostringstream.cc create mode 100644 third_party/absl/absl/strings/internal/ostringstream.h create mode 100644 third_party/absl/absl/strings/internal/ostringstream_benchmark.cc create mode 100644 third_party/absl/absl/strings/internal/ostringstream_test.cc create mode 100644 third_party/absl/absl/strings/internal/pow10_helper.cc create mode 100644 third_party/absl/absl/strings/internal/pow10_helper.h create mode 100644 third_party/absl/absl/strings/internal/pow10_helper_test.cc create mode 100644 third_party/absl/absl/strings/internal/resize_uninitialized.h create mode 100644 third_party/absl/absl/strings/internal/resize_uninitialized_test.cc create mode 100644 third_party/absl/absl/strings/internal/stl_type_traits.h create mode 100644 third_party/absl/absl/strings/internal/str_format/arg.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/arg.h create mode 100644 third_party/absl/absl/strings/internal/str_format/arg_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/bind.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/bind.h create mode 100644 third_party/absl/absl/strings/internal/str_format/bind_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/checker.h create mode 100644 third_party/absl/absl/strings/internal/str_format/checker_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/constexpr_parser.h create mode 100644 third_party/absl/absl/strings/internal/str_format/convert_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/extension.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/extension.h create mode 100644 third_party/absl/absl/strings/internal/str_format/extension_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/float_conversion.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/float_conversion.h create mode 100644 third_party/absl/absl/strings/internal/str_format/output.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/output.h create mode 100644 third_party/absl/absl/strings/internal/str_format/output_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/parser.cc create mode 100644 third_party/absl/absl/strings/internal/str_format/parser.h create mode 100644 third_party/absl/absl/strings/internal/str_format/parser_test.cc create mode 100644 third_party/absl/absl/strings/internal/str_join_internal.h create mode 100644 third_party/absl/absl/strings/internal/str_split_internal.h create mode 100644 third_party/absl/absl/strings/internal/string_constant.h create mode 100644 third_party/absl/absl/strings/internal/string_constant_test.cc create mode 100644 third_party/absl/absl/strings/internal/stringify_sink.cc create mode 100644 third_party/absl/absl/strings/internal/stringify_sink.h create mode 100644 third_party/absl/absl/strings/internal/utf8.cc create mode 100644 third_party/absl/absl/strings/internal/utf8.h create mode 100644 third_party/absl/absl/strings/internal/utf8_test.cc create mode 100644 third_party/absl/absl/strings/match.cc create mode 100644 third_party/absl/absl/strings/match.h create mode 100644 third_party/absl/absl/strings/match_test.cc create mode 100644 third_party/absl/absl/strings/numbers.cc create mode 100644 third_party/absl/absl/strings/numbers.h create mode 100644 third_party/absl/absl/strings/numbers_benchmark.cc create mode 100644 third_party/absl/absl/strings/numbers_test.cc create mode 100644 third_party/absl/absl/strings/str_cat.cc create mode 100644 third_party/absl/absl/strings/str_cat.h create mode 100644 third_party/absl/absl/strings/str_cat_benchmark.cc create mode 100644 third_party/absl/absl/strings/str_cat_test.cc create mode 100644 third_party/absl/absl/strings/str_format.h create mode 100644 third_party/absl/absl/strings/str_format_test.cc create mode 100644 third_party/absl/absl/strings/str_join.h create mode 100644 third_party/absl/absl/strings/str_join_benchmark.cc create mode 100644 third_party/absl/absl/strings/str_join_test.cc create mode 100644 third_party/absl/absl/strings/str_replace.cc create mode 100644 third_party/absl/absl/strings/str_replace.h create mode 100644 third_party/absl/absl/strings/str_replace_benchmark.cc create mode 100644 third_party/absl/absl/strings/str_replace_test.cc create mode 100644 third_party/absl/absl/strings/str_split.cc create mode 100644 third_party/absl/absl/strings/str_split.h create mode 100644 third_party/absl/absl/strings/str_split_benchmark.cc create mode 100644 third_party/absl/absl/strings/str_split_test.cc create mode 100644 third_party/absl/absl/strings/string_view.cc create mode 100644 third_party/absl/absl/strings/string_view.h create mode 100644 third_party/absl/absl/strings/string_view_benchmark.cc create mode 100644 third_party/absl/absl/strings/string_view_test.cc create mode 100644 third_party/absl/absl/strings/strip.h create mode 100644 third_party/absl/absl/strings/strip_test.cc create mode 100644 third_party/absl/absl/strings/substitute.cc create mode 100644 third_party/absl/absl/strings/substitute.h create mode 100644 third_party/absl/absl/strings/substitute_test.cc create mode 100644 third_party/absl/absl/synchronization/BUILD.bazel create mode 100644 third_party/absl/absl/synchronization/CMakeLists.txt create mode 100644 third_party/absl/absl/synchronization/barrier.cc create mode 100644 third_party/absl/absl/synchronization/barrier.h create mode 100644 third_party/absl/absl/synchronization/barrier_test.cc create mode 100644 third_party/absl/absl/synchronization/blocking_counter.cc create mode 100644 third_party/absl/absl/synchronization/blocking_counter.h create mode 100644 third_party/absl/absl/synchronization/blocking_counter_benchmark.cc create mode 100644 third_party/absl/absl/synchronization/blocking_counter_test.cc create mode 100644 third_party/absl/absl/synchronization/internal/create_thread_identity.cc create mode 100644 third_party/absl/absl/synchronization/internal/create_thread_identity.h create mode 100644 third_party/absl/absl/synchronization/internal/futex.h create mode 100644 third_party/absl/absl/synchronization/internal/futex_waiter.cc create mode 100644 third_party/absl/absl/synchronization/internal/futex_waiter.h create mode 100644 third_party/absl/absl/synchronization/internal/graphcycles.cc create mode 100644 third_party/absl/absl/synchronization/internal/graphcycles.h create mode 100644 third_party/absl/absl/synchronization/internal/graphcycles_benchmark.cc create mode 100644 third_party/absl/absl/synchronization/internal/graphcycles_test.cc create mode 100644 third_party/absl/absl/synchronization/internal/kernel_timeout.cc create mode 100644 third_party/absl/absl/synchronization/internal/kernel_timeout.h create mode 100644 third_party/absl/absl/synchronization/internal/kernel_timeout_test.cc create mode 100644 third_party/absl/absl/synchronization/internal/per_thread_sem.cc create mode 100644 third_party/absl/absl/synchronization/internal/per_thread_sem.h create mode 100644 third_party/absl/absl/synchronization/internal/per_thread_sem_test.cc create mode 100644 third_party/absl/absl/synchronization/internal/pthread_waiter.cc create mode 100644 third_party/absl/absl/synchronization/internal/pthread_waiter.h create mode 100644 third_party/absl/absl/synchronization/internal/sem_waiter.cc create mode 100644 third_party/absl/absl/synchronization/internal/sem_waiter.h create mode 100644 third_party/absl/absl/synchronization/internal/stdcpp_waiter.cc create mode 100644 third_party/absl/absl/synchronization/internal/stdcpp_waiter.h create mode 100644 third_party/absl/absl/synchronization/internal/thread_pool.h create mode 100644 third_party/absl/absl/synchronization/internal/waiter.h create mode 100644 third_party/absl/absl/synchronization/internal/waiter_base.cc create mode 100644 third_party/absl/absl/synchronization/internal/waiter_base.h create mode 100644 third_party/absl/absl/synchronization/internal/waiter_test.cc create mode 100644 third_party/absl/absl/synchronization/internal/win32_waiter.cc create mode 100644 third_party/absl/absl/synchronization/internal/win32_waiter.h create mode 100644 third_party/absl/absl/synchronization/lifetime_test.cc create mode 100644 third_party/absl/absl/synchronization/mutex.cc create mode 100644 third_party/absl/absl/synchronization/mutex.h create mode 100644 third_party/absl/absl/synchronization/mutex_benchmark.cc create mode 100644 third_party/absl/absl/synchronization/mutex_method_pointer_test.cc create mode 100644 third_party/absl/absl/synchronization/mutex_test.cc create mode 100644 third_party/absl/absl/synchronization/notification.cc create mode 100644 third_party/absl/absl/synchronization/notification.h create mode 100644 third_party/absl/absl/synchronization/notification_test.cc create mode 100644 third_party/absl/absl/time/BUILD.bazel create mode 100644 third_party/absl/absl/time/CMakeLists.txt create mode 100644 third_party/absl/absl/time/civil_time.cc create mode 100644 third_party/absl/absl/time/civil_time.h create mode 100644 third_party/absl/absl/time/civil_time_benchmark.cc create mode 100644 third_party/absl/absl/time/civil_time_test.cc create mode 100644 third_party/absl/absl/time/clock.cc create mode 100644 third_party/absl/absl/time/clock.h create mode 100644 third_party/absl/absl/time/clock_benchmark.cc create mode 100644 third_party/absl/absl/time/clock_test.cc create mode 100644 third_party/absl/absl/time/duration.cc create mode 100644 third_party/absl/absl/time/duration_benchmark.cc create mode 100644 third_party/absl/absl/time/duration_test.cc create mode 100644 third_party/absl/absl/time/flag_test.cc create mode 100644 third_party/absl/absl/time/format.cc create mode 100644 third_party/absl/absl/time/format_benchmark.cc create mode 100644 third_party/absl/absl/time/format_test.cc create mode 100644 third_party/absl/absl/time/internal/cctz/BUILD.bazel create mode 100644 third_party/absl/absl/time/internal/cctz/include/cctz/civil_time.h create mode 100644 third_party/absl/absl/time/internal/cctz/include/cctz/civil_time_detail.h create mode 100644 third_party/absl/absl/time/internal/cctz/include/cctz/time_zone.h create mode 100644 third_party/absl/absl/time/internal/cctz/include/cctz/zone_info_source.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/cctz_benchmark.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/civil_time_detail.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/civil_time_test.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_fixed.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_fixed.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_format.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_format_test.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_if.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_if.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_impl.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_impl.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_info.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_info.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_libc.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_libc.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_lookup.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_lookup_test.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_posix.cc create mode 100644 third_party/absl/absl/time/internal/cctz/src/time_zone_posix.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/tzfile.h create mode 100644 third_party/absl/absl/time/internal/cctz/src/zone_info_source.cc create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/README.zoneinfo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/version create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Abidjan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Accra create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Addis_Ababa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Algiers create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmara create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Asmera create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bamako create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bangui create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Banjul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bissau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Blantyre create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Brazzaville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Bujumbura create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ceuta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Conakry create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dakar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Dar_es_Salaam create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Djibouti create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Douala create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Freetown create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Gaborone create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Harare create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Johannesburg create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Juba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kampala create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Khartoum create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kigali create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Kinshasa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lagos create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Libreville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lome create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Luanda create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lubumbashi create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Lusaka create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Malabo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maputo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Maseru create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mbabane create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Mogadishu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Monrovia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nairobi create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ndjamena create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Niamey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Nouakchott create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Ouagadougou create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Porto-Novo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Sao_Tome create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Timbuktu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tripoli create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Tunis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Africa/Windhoek create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Adak create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Anchorage create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Anguilla create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Antigua create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Araguaina create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Buenos_Aires create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Catamarca create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/ComodRivadavia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Cordoba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Jujuy create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/La_Rioja create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Mendoza create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Rio_Gallegos create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Salta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Juan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/San_Luis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Tucuman create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Argentina/Ushuaia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Aruba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Asuncion create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Atikokan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Atka create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Bahia_Banderas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Barbados create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Belem create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Belize create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Blanc-Sablon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Boa_Vista create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Bogota create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Boise create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Buenos_Aires create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cambridge_Bay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Campo_Grande create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cancun create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Caracas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Catamarca create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cayenne create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cayman create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Chicago create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Chihuahua create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Ciudad_Juarez create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Coral_Harbour create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cordoba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Costa_Rica create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Creston create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Cuiaba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Curacao create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Danmarkshavn create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Dawson_Creek create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Denver create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Detroit create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Dominica create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Edmonton create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Eirunepe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/El_Salvador create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Ensenada create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Nelson create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Fort_Wayne create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Fortaleza create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Glace_Bay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Goose_Bay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Grand_Turk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Grenada create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Guadeloupe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Guatemala create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Guayaquil create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Guyana create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Halifax create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Havana create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Hermosillo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Indianapolis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Knox create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Marengo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Petersburg create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Tell_City create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vevay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Vincennes create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indiana/Winamac create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Indianapolis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Inuvik create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Iqaluit create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Jamaica create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Jujuy create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Juneau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Louisville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Kentucky/Monticello create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Knox_IN create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Kralendijk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/La_Paz create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Lima create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Los_Angeles create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Louisville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Lower_Princes create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Maceio create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Managua create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Manaus create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Marigot create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Martinique create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Matamoros create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Mazatlan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Mendoza create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Menominee create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Merida create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Metlakatla create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Mexico_City create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Miquelon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Moncton create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Monterrey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Montevideo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Montreal create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Montserrat create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Nassau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/New_York create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Nipigon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Nome create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Noronha create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Beulah create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/Center create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/North_Dakota/New_Salem create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Ojinaga create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Panama create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Pangnirtung create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Paramaribo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Phoenix create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Port-au-Prince create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Port_of_Spain create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Acre create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Porto_Velho create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Puerto_Rico create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Rainy_River create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Rankin_Inlet create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Recife create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Regina create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Resolute create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Rio_Branco create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Rosario create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Santa_Isabel create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Santarem create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Santo_Domingo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Sao_Paulo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Scoresbysund create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Shiprock create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Sitka create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Barthelemy create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Johns create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Kitts create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Lucia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Thomas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/St_Vincent create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Swift_Current create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Tegucigalpa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Thule create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Thunder_Bay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Tijuana create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Toronto create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Tortola create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Vancouver create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Virgin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Whitehorse create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Winnipeg create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Yakutat create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Casey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Davis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/DumontDUrville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Macquarie create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Mawson create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/McMurdo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Palmer create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Rothera create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/South_Pole create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Syowa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Troll create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Antarctica/Vostok create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Arctic/Longyearbyen create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aden create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Almaty create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Amman create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Anadyr create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Aqtobe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashgabat create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ashkhabad create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Atyrau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baghdad create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bahrain create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Baku create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bangkok create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Barnaul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Beirut create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Bishkek create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Brunei create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Calcutta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chita create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Choibalsan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chongqing create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Chungking create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Colombo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dacca create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Damascus create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dhaka create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dili create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dubai create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Dushanbe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Famagusta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Harbin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ho_Chi_Minh create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hong_Kong create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hovd create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Irkutsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Istanbul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jakarta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jayapura create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Jerusalem create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kabul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kamchatka create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Karachi create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kashgar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kathmandu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Katmandu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Khandyga create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kolkata create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Krasnoyarsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuala_Lumpur create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuching create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Kuwait create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macao create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Macau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Magadan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Makassar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Manila create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Muscat create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Nicosia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novokuznetsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Novosibirsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Omsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Oral create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Phnom_Penh create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pontianak create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Pyongyang create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qatar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qostanay create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Qyzylorda create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Rangoon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Riyadh create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Saigon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Sakhalin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Samarkand create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Seoul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Shanghai create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Singapore create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Srednekolymsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Taipei create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tashkent create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tbilisi create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tehran create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tel_Aviv create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimbu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Thimphu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tokyo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Tomsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ujung_Pandang create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulaanbaatar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ulan_Bator create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Urumqi create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Ust-Nera create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vientiane create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Vladivostok create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yakutsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yangon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yekaterinburg create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Asia/Yerevan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Azores create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Bermuda create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Canary create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Cape_Verde create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faeroe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Faroe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Jan_Mayen create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Madeira create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Reykjavik create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/South_Georgia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/St_Helena create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Atlantic/Stanley create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/ACT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Adelaide create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Brisbane create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Broken_Hill create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Canberra create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Currie create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Darwin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Eucla create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Hobart create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/LHI create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lindeman create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Lord_Howe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Melbourne create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/NSW create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/North create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Perth create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Queensland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/South create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Sydney create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Tasmania create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Victoria create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/West create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Australia/Yancowinna create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Brazil/Acre create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Brazil/DeNoronha create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Brazil/East create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Brazil/West create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/CET create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/CST6CDT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Atlantic create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Central create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Eastern create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Mountain create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Newfoundland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Pacific create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Saskatchewan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Canada/Yukon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Chile/EasterIsland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Cuba create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/EET create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/EST create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/EST5EDT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Egypt create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Eire create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+1 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+10 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+11 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+12 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+2 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+3 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+4 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+5 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+6 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+7 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+8 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT+9 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-1 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-10 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-11 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-12 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-13 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-14 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-2 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-3 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-4 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-5 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-6 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-7 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-8 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT-9 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/GMT0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/Greenwich create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/UCT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/UTC create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/Universal create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Etc/Zulu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Amsterdam create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Andorra create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Astrakhan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Athens create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belfast create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Belgrade create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Berlin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bratislava create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Brussels create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Bucharest create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Budapest create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Busingen create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Chisinau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Copenhagen create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Dublin create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Gibraltar create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Guernsey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Helsinki create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Isle_of_Man create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Istanbul create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Jersey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kaliningrad create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kyiv create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Lisbon create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ljubljana create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/London create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Luxembourg create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Madrid create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Malta create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Mariehamn create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Minsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Monaco create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Moscow create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Nicosia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Oslo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Paris create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Podgorica create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Prague create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Riga create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Rome create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Samara create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/San_Marino create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sarajevo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Saratov create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Skopje create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Sofia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Stockholm create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tallinn create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tirane create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Tiraspol create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Ulyanovsk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vaduz create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vatican create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vienna create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Vilnius create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Warsaw create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zagreb create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zurich create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Factory create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GB create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GB-Eire create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GMT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GMT+0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GMT-0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/GMT0 create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Greenwich create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/HST create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Hongkong create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Iceland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Antananarivo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Chagos create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Christmas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Cocos create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Comoro create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Kerguelen create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mahe create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Maldives create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mauritius create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Mayotte create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Indian/Reunion create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Iran create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Israel create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Jamaica create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Japan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Kwajalein create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Libya create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/MET create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/MST create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/MST7MDT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaNorte create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Mexico/BajaSur create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Mexico/General create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/NZ create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/NZ-CHAT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Navajo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/PRC create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/PST8PDT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Apia create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Auckland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Bougainville create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chatham create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Chuuk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Easter create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Efate create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Enderbury create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fakaofo create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Fiji create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Funafuti create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Galapagos create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Gambier create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guadalcanal create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Guam create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Honolulu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Johnston create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kanton create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kiritimati create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kosrae create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Kwajalein create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Majuro create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Marquesas create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Midway create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Nauru create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Niue create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Norfolk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Noumea create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pago_Pago create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Palau create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pitcairn create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Pohnpei create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Ponape create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Port_Moresby create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Rarotonga create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Saipan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Samoa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tahiti create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tarawa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Tongatapu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Truk create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wake create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Wallis create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Pacific/Yap create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Poland create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Portugal create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/ROC create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/ROK create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Singapore create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Turkey create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/UCT create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Alaska create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Aleutian create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Arizona create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Central create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/East-Indiana create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Eastern create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Hawaii create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Indiana-Starke create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Michigan create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Mountain create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Pacific create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/US/Samoa create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/UTC create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Universal create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/W-SU create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/WET create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/Zulu create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab create mode 100644 third_party/absl/absl/time/internal/cctz/testdata/zoneinfo/zonenow.tab create mode 100644 third_party/absl/absl/time/internal/get_current_time_chrono.inc create mode 100644 third_party/absl/absl/time/internal/get_current_time_posix.inc create mode 100644 third_party/absl/absl/time/internal/test_util.cc create mode 100644 third_party/absl/absl/time/internal/test_util.h create mode 100644 third_party/absl/absl/time/time.cc create mode 100644 third_party/absl/absl/time/time.h create mode 100644 third_party/absl/absl/time/time_benchmark.cc create mode 100644 third_party/absl/absl/time/time_test.cc create mode 100644 third_party/absl/absl/time/time_zone_test.cc create mode 100644 third_party/absl/absl/types/BUILD.bazel create mode 100644 third_party/absl/absl/types/CMakeLists.txt create mode 100644 third_party/absl/absl/types/any.h create mode 100644 third_party/absl/absl/types/any_exception_safety_test.cc create mode 100644 third_party/absl/absl/types/any_test.cc create mode 100644 third_party/absl/absl/types/bad_any_cast.cc create mode 100644 third_party/absl/absl/types/bad_any_cast.h create mode 100644 third_party/absl/absl/types/bad_optional_access.cc create mode 100644 third_party/absl/absl/types/bad_optional_access.h create mode 100644 third_party/absl/absl/types/bad_variant_access.cc create mode 100644 third_party/absl/absl/types/bad_variant_access.h create mode 100644 third_party/absl/absl/types/compare.h create mode 100644 third_party/absl/absl/types/compare_test.cc create mode 100644 third_party/absl/absl/types/internal/optional.h create mode 100644 third_party/absl/absl/types/internal/span.h create mode 100644 third_party/absl/absl/types/internal/variant.h create mode 100644 third_party/absl/absl/types/optional.h create mode 100644 third_party/absl/absl/types/optional_exception_safety_test.cc create mode 100644 third_party/absl/absl/types/optional_test.cc create mode 100644 third_party/absl/absl/types/span.h create mode 100644 third_party/absl/absl/types/span_test.cc create mode 100644 third_party/absl/absl/types/variant.h create mode 100644 third_party/absl/absl/types/variant_benchmark.cc create mode 100644 third_party/absl/absl/types/variant_exception_safety_test.cc create mode 100644 third_party/absl/absl/types/variant_test.cc create mode 100644 third_party/absl/absl/utility/BUILD.bazel create mode 100644 third_party/absl/absl/utility/CMakeLists.txt create mode 100644 third_party/absl/absl/utility/internal/if_constexpr.h create mode 100644 third_party/absl/absl/utility/internal/if_constexpr_test.cc create mode 100644 third_party/absl/absl/utility/utility.h create mode 100644 third_party/absl/absl/utility/utility_test.cc create mode 100644 third_party/absl/ci/absl_alternate_options.h create mode 100644 third_party/absl/ci/cmake_common.sh create mode 100755 third_party/absl/ci/cmake_install_test.sh create mode 100755 third_party/absl/ci/linux_arm_clang-latest_libcxx_bazel.sh create mode 100755 third_party/absl/ci/linux_clang-latest_libcxx_asan_bazel.sh create mode 100755 third_party/absl/ci/linux_clang-latest_libcxx_bazel.sh create mode 100755 third_party/absl/ci/linux_clang-latest_libcxx_tsan_bazel.sh create mode 100755 third_party/absl/ci/linux_clang-latest_libstdcxx_bazel.sh create mode 100644 third_party/absl/ci/linux_docker_containers.sh create mode 100755 third_party/absl/ci/linux_gcc-floor_libstdcxx_bazel.sh create mode 100755 third_party/absl/ci/linux_gcc-latest_libstdcxx_bazel.sh create mode 100755 third_party/absl/ci/linux_gcc-latest_libstdcxx_cmake.sh create mode 100755 third_party/absl/ci/linux_gcc_alpine_cmake.sh create mode 100755 third_party/absl/ci/macos_xcode_bazel.sh create mode 100755 third_party/absl/ci/macos_xcode_cmake.sh create mode 100755 third_party/absl/ci/windows_clangcl_bazel.bat create mode 100755 third_party/absl/ci/windows_msvc_bazel.bat create mode 100755 third_party/absl/ci/windows_msvc_cmake.bat create mode 100755 third_party/absl/conanfile.py create mode 100755 third_party/absl/create_lts.py diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index be7a21642..f9e7c07cb 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -82,6 +82,20 @@ properties of the app. The settings.gradle (or settings.gradle.kts) file configures repositories for fetching dependencies, and declares each app module. +### CMake + +Contains CMake modules used by the samples. See [CMake/README.md] for details. + +[CMake/README.md]: CMake/README.md + +### third_party + +Contains third-party code used by the samples. These should be kept to a minimum +to avoid the samples being too difficult to reuse. See [third_party/README.md] +for more information. + +[third_party/README.md]: third_party/README.md + ### Metadata directories #### .github diff --git a/CMake/README.md b/CMake/README.md new file mode 100644 index 000000000..2a7ef3098 --- /dev/null +++ b/CMake/README.md @@ -0,0 +1,6 @@ +# CMake modules + +This directory contains CMake modules used by the samples. They can be imported +in any sample with `include()`, where `` is the name of the module +file without the extension. For example, `include(absl)` will include absl in +the build and make those libraries available to the sample. diff --git a/CMake/absl.cmake b/CMake/absl.cmake new file mode 100644 index 000000000..33972f8d8 --- /dev/null +++ b/CMake/absl.cmake @@ -0,0 +1,2 @@ +include_guard() +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/absl ${CMAKE_CURRENT_BINARY_DIR}/absl) diff --git a/build-logic/src/main/java/com/android/ndk/samples/buildlogic/AndroidApplicationConventionPlugin.kt b/build-logic/src/main/java/com/android/ndk/samples/buildlogic/AndroidApplicationConventionPlugin.kt index 23f521823..f5441e228 100644 --- a/build-logic/src/main/java/com/android/ndk/samples/buildlogic/AndroidApplicationConventionPlugin.kt +++ b/build-logic/src/main/java/com/android/ndk/samples/buildlogic/AndroidApplicationConventionPlugin.kt @@ -8,6 +8,7 @@ import org.gradle.kotlin.dsl.configure class AndroidApplicationConventionPlugin : Plugin { override fun apply(target: Project) { + val modulePath = target.rootProject.rootDir.resolve("CMake") with(target) { with(pluginManager) { apply("com.android.application") @@ -19,6 +20,12 @@ class AndroidApplicationConventionPlugin : Plugin { defaultConfig { minSdk = Versions.MIN_SDK targetSdk = Versions.TARGET_SDK + + externalNativeBuild { + cmake { + arguments.add("-DCMAKE_MODULE_PATH=$modulePath") + } + } } compileOptions { sourceCompatibility = Versions.JAVA diff --git a/third_party/.clang-format b/third_party/.clang-format new file mode 100644 index 000000000..e7d26fae5 --- /dev/null +++ b/third_party/.clang-format @@ -0,0 +1,2 @@ +DisableFormat: true +SortIncludes: never diff --git a/third_party/README.md b/third_party/README.md new file mode 100644 index 000000000..ea525cb96 --- /dev/null +++ b/third_party/README.md @@ -0,0 +1,13 @@ +# third_party + +This directory contains third-party code used by the samples. Currently this +directory contains only absl, which provides things like `LOG(ERROR)` and +`CHECK()`. + +Note that absl does **not** have a stable ABI at the time of writing. As such, +if you are a middleware developer who distributes native libraries in binary +form (whether `.so` or `.a`), you probably cannot use absl. See [Advice for +middleware vendors] for more details. + +[Advice for middleware vendors]: + https://developer.android.com/ndk/guides/middleware-vendors diff --git a/third_party/absl/.clang-format b/third_party/absl/.clang-format new file mode 100644 index 000000000..06ea346a1 --- /dev/null +++ b/third_party/absl/.clang-format @@ -0,0 +1,4 @@ +--- +Language: Cpp +BasedOnStyle: Google +... diff --git a/third_party/absl/.github/ISSUE_TEMPLATE/00-bug_report.yml b/third_party/absl/.github/ISSUE_TEMPLATE/00-bug_report.yml new file mode 100644 index 000000000..32cebe15a --- /dev/null +++ b/third_party/absl/.github/ISSUE_TEMPLATE/00-bug_report.yml @@ -0,0 +1,53 @@ +name: Bug Report +description: Let us know that something does not work as expected. +title: "[Bug]: Please title this bug report" +body: + - type: textarea + id: what-happened + attributes: + label: Describe the issue + description: What happened, and what did you expect to happen? + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: It is important that we are able to reproduce the problem that you are experiencing. Please provide all code and relevant steps to reproduce the problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the problem are also helpful. + validations: + required: true + - type: textarea + id: version + attributes: + label: What version of Abseil are you using? + description: Please include the output of `git rev-parse HEAD` or the name of the LTS release that you are using. + validations: + required: true + - type: textarea + id: os + attributes: + label: What operating system and version are you using? + description: If you are using a Linux distribution please include the name and version of the distribution as well. + validations: + required: true + - type: textarea + id: compiler + attributes: + label: What compiler and version are you using? + description: Please include the output of `gcc -v` or `clang -v`, or the equivalent for your compiler. + validations: + required: true + - type: textarea + id: buildsystem + attributes: + label: What build system are you using? + description: Please include the output of `bazel --version` or `cmake --version`, or the equivalent for your build system. + validations: + required: true + - type: textarea + id: additional + attributes: + label: Additional context + description: Add any other context about the problem here. + validations: + required: false diff --git a/third_party/absl/.github/ISSUE_TEMPLATE/config.yml b/third_party/absl/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..c690fd9a5 --- /dev/null +++ b/third_party/absl/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Question + url: https://github.com/abseil/abseil-cpp/discussions + about: Have a question? Ask us anything! :-) diff --git a/third_party/absl/.github/PULL_REQUEST_TEMPLATE.md b/third_party/absl/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..ff55e3529 --- /dev/null +++ b/third_party/absl/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ +Thank you for your contribution to Abseil! + +Before submitting this PR, please be sure to read our [contributing +guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md). + +If you are a Googler, please also note that it is required that you send us a +Piper CL instead of using the GitHub pull-request process. The code propagation +process will deliver the change to GitHub. diff --git a/third_party/absl/.gitignore b/third_party/absl/.gitignore new file mode 100644 index 000000000..3a729f416 --- /dev/null +++ b/third_party/absl/.gitignore @@ -0,0 +1,17 @@ +# Bzlmod lockfile +MODULE.bazel.lock +# Ignore all bazel-* symlinks. +/bazel-* +# Ignore Bazel verbose explanations +--verbose_explanations +# Ignore CMake usual build directory +build +# Ignore Vim files +*.swp +# Ignore QtCreator Project file +CMakeLists.txt.user +# Ignore VS Code files +.vscode/* +# Ignore generated python artifacts +*.pyc +copts/__pycache__/ diff --git a/third_party/absl/ABSEIL_ISSUE_TEMPLATE.md b/third_party/absl/ABSEIL_ISSUE_TEMPLATE.md new file mode 100644 index 000000000..ed5461f16 --- /dev/null +++ b/third_party/absl/ABSEIL_ISSUE_TEMPLATE.md @@ -0,0 +1,22 @@ +Please submit a new Abseil Issue using the template below: + +## [Short title of proposed API change(s)] + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- + +## Background + +[Provide the background information that is required in order to evaluate the +proposed API changes. No controversial claims should be made here. If there are +design constraints that need to be considered, they should be presented here +**along with justification for those constraints**. Linking to other docs is +good, but please keep the **pertinent information as self contained** as +possible in this section.] + +## Proposed API Change (s) + +[Please clearly describe the API change(s) being proposed. If multiple changes, +please keep them clearly distinguished. When possible, **use example code +snippets to illustrate before-after API usages**. List pros-n-cons. Highlight +the main questions that you want to be answered. Given the Abseil project compatibility requirements, describe why the API change is safe.] diff --git a/third_party/absl/AUTHORS b/third_party/absl/AUTHORS new file mode 100644 index 000000000..976d31def --- /dev/null +++ b/third_party/absl/AUTHORS @@ -0,0 +1,6 @@ +# This is the list of Abseil authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. diff --git a/third_party/absl/BUILD.bazel b/third_party/absl/BUILD.bazel new file mode 100644 index 000000000..79fb0ecd7 --- /dev/null +++ b/third_party/absl/BUILD.bazel @@ -0,0 +1,25 @@ +# +# Copyright 2020 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +# Expose license for external usage through bazel. +exports_files([ + "AUTHORS", + "LICENSE", +]) diff --git a/third_party/absl/CMake/AbseilDll.cmake b/third_party/absl/CMake/AbseilDll.cmake new file mode 100644 index 000000000..47f3beebd --- /dev/null +++ b/third_party/absl/CMake/AbseilDll.cmake @@ -0,0 +1,837 @@ +include(CMakeParseArguments) +include(GNUInstallDirs) + +set(ABSL_INTERNAL_DLL_FILES + "algorithm/algorithm.h" + "algorithm/container.h" + "base/attributes.h" + "base/call_once.h" + "base/casts.h" + "base/config.h" + "base/const_init.h" + "base/dynamic_annotations.h" + "base/internal/atomic_hook.h" + "base/internal/cycleclock.cc" + "base/internal/cycleclock.h" + "base/internal/cycleclock_config.h" + "base/internal/direct_mmap.h" + "base/internal/dynamic_annotations.h" + "base/internal/endian.h" + "base/internal/errno_saver.h" + "base/internal/fast_type_id.h" + "base/internal/hide_ptr.h" + "base/internal/identity.h" + "base/internal/invoke.h" + "base/internal/inline_variable.h" + "base/internal/low_level_alloc.cc" + "base/internal/low_level_alloc.h" + "base/internal/low_level_scheduling.h" + "base/internal/nullability_impl.h" + "base/internal/per_thread_tls.h" + "base/prefetch.h" + "base/internal/pretty_function.h" + "base/internal/raw_logging.cc" + "base/internal/raw_logging.h" + "base/internal/scheduling_mode.h" + "base/internal/scoped_set_env.cc" + "base/internal/scoped_set_env.h" + "base/internal/strerror.h" + "base/internal/strerror.cc" + "base/internal/spinlock.cc" + "base/internal/spinlock.h" + "base/internal/spinlock_wait.cc" + "base/internal/spinlock_wait.h" + "base/internal/sysinfo.cc" + "base/internal/sysinfo.h" + "base/internal/thread_identity.cc" + "base/internal/thread_identity.h" + "base/internal/throw_delegate.cc" + "base/internal/throw_delegate.h" + "base/internal/tsan_mutex_interface.h" + "base/internal/unaligned_access.h" + "base/internal/unscaledcycleclock.cc" + "base/internal/unscaledcycleclock.h" + "base/internal/unscaledcycleclock_config.h" + "base/log_severity.cc" + "base/log_severity.h" + "base/macros.h" + "base/no_destructor.h" + "base/nullability.h" + "base/optimization.h" + "base/options.h" + "base/policy_checks.h" + "base/port.h" + "base/thread_annotations.h" + "cleanup/cleanup.h" + "cleanup/internal/cleanup.h" + "container/btree_map.h" + "container/btree_set.h" + "container/fixed_array.h" + "container/flat_hash_map.h" + "container/flat_hash_set.h" + "container/inlined_vector.h" + "container/internal/btree.h" + "container/internal/btree_container.h" + "container/internal/common.h" + "container/internal/common_policy_traits.h" + "container/internal/compressed_tuple.h" + "container/internal/container_memory.h" + "container/internal/hash_function_defaults.h" + "container/internal/hash_policy_traits.h" + "container/internal/hashtable_debug.h" + "container/internal/hashtable_debug_hooks.h" + "container/internal/hashtablez_sampler.cc" + "container/internal/hashtablez_sampler.h" + "container/internal/hashtablez_sampler_force_weak_definition.cc" + "container/internal/inlined_vector.h" + "container/internal/layout.h" + "container/internal/node_slot_policy.h" + "container/internal/raw_hash_map.h" + "container/internal/raw_hash_set.cc" + "container/internal/raw_hash_set.h" + "container/internal/tracked.h" + "container/node_hash_map.h" + "container/node_hash_set.h" + "crc/crc32c.cc" + "crc/crc32c.h" + "crc/internal/cpu_detect.cc" + "crc/internal/cpu_detect.h" + "crc/internal/crc32c.h" + "crc/internal/crc32c_inline.h" + "crc/internal/crc32_x86_arm_combined_simd.h" + "crc/internal/crc.cc" + "crc/internal/crc.h" + "crc/internal/crc_cord_state.cc" + "crc/internal/crc_cord_state.h" + "crc/internal/crc_internal.h" + "crc/internal/crc_x86_arm_combined.cc" + "crc/internal/crc_memcpy_fallback.cc" + "crc/internal/crc_memcpy.h" + "crc/internal/crc_memcpy_x86_arm_combined.cc" + "crc/internal/crc_non_temporal_memcpy.cc" + "crc/internal/crc_x86_arm_combined.cc" + "crc/internal/non_temporal_arm_intrinsics.h" + "crc/internal/non_temporal_memcpy.h" + "debugging/failure_signal_handler.cc" + "debugging/failure_signal_handler.h" + "debugging/leak_check.h" + "debugging/stacktrace.cc" + "debugging/stacktrace.h" + "debugging/symbolize.cc" + "debugging/symbolize.h" + "debugging/internal/address_is_readable.cc" + "debugging/internal/address_is_readable.h" + "debugging/internal/demangle.cc" + "debugging/internal/demangle.h" + "debugging/internal/elf_mem_image.cc" + "debugging/internal/elf_mem_image.h" + "debugging/internal/examine_stack.cc" + "debugging/internal/examine_stack.h" + "debugging/internal/stack_consumption.cc" + "debugging/internal/stack_consumption.h" + "debugging/internal/stacktrace_config.h" + "debugging/internal/symbolize.h" + "debugging/internal/vdso_support.cc" + "debugging/internal/vdso_support.h" + "functional/any_invocable.h" + "functional/internal/front_binder.h" + "functional/bind_front.h" + "functional/function_ref.h" + "functional/internal/any_invocable.h" + "functional/internal/function_ref.h" + "functional/overload.h" + "hash/hash.h" + "hash/internal/city.h" + "hash/internal/city.cc" + "hash/internal/hash.h" + "hash/internal/hash.cc" + "hash/internal/spy_hash_state.h" + "hash/internal/low_level_hash.h" + "hash/internal/low_level_hash.cc" + "log/absl_check.h" + "log/absl_log.h" + "log/absl_vlog_is_on.h" + "log/check.h" + "log/die_if_null.cc" + "log/die_if_null.h" + "log/globals.cc" + "log/globals.h" + "log/internal/append_truncated.h" + "log/internal/check_impl.h" + "log/internal/check_op.cc" + "log/internal/check_op.h" + "log/internal/conditions.cc" + "log/internal/conditions.h" + "log/internal/config.h" + "log/internal/fnmatch.h" + "log/internal/fnmatch.cc" + "log/internal/globals.cc" + "log/internal/globals.h" + "log/internal/log_format.cc" + "log/internal/log_format.h" + "log/internal/log_impl.h" + "log/internal/log_message.cc" + "log/internal/log_message.h" + "log/internal/log_sink_set.cc" + "log/internal/log_sink_set.h" + "log/internal/nullguard.cc" + "log/internal/nullguard.h" + "log/internal/nullstream.h" + "log/internal/proto.h" + "log/internal/proto.cc" + "log/internal/strip.h" + "log/internal/structured.h" + "log/internal/vlog_config.cc" + "log/internal/vlog_config.h" + "log/internal/voidify.h" + "log/initialize.cc" + "log/initialize.h" + "log/log.h" + "log/log_entry.cc" + "log/log_entry.h" + "log/log_sink.cc" + "log/log_sink.h" + "log/log_sink_registry.h" + "log/log_streamer.h" + "log/structured.h" + "log/vlog_is_on.h" + "memory/memory.h" + "meta/type_traits.h" + "numeric/bits.h" + "numeric/int128.cc" + "numeric/int128.h" + "numeric/internal/bits.h" + "numeric/internal/representation.h" + "profiling/internal/exponential_biased.cc" + "profiling/internal/exponential_biased.h" + "profiling/internal/periodic_sampler.cc" + "profiling/internal/periodic_sampler.h" + "profiling/internal/sample_recorder.h" + "random/bernoulli_distribution.h" + "random/beta_distribution.h" + "random/bit_gen_ref.h" + "random/discrete_distribution.cc" + "random/discrete_distribution.h" + "random/distributions.h" + "random/exponential_distribution.h" + "random/gaussian_distribution.cc" + "random/gaussian_distribution.h" + "random/internal/distribution_caller.h" + "random/internal/fastmath.h" + "random/internal/fast_uniform_bits.h" + "random/internal/generate_real.h" + "random/internal/iostream_state_saver.h" + "random/internal/nonsecure_base.h" + "random/internal/pcg_engine.h" + "random/internal/platform.h" + "random/internal/pool_urbg.cc" + "random/internal/pool_urbg.h" + "random/internal/randen.cc" + "random/internal/randen.h" + "random/internal/randen_detect.cc" + "random/internal/randen_detect.h" + "random/internal/randen_engine.h" + "random/internal/randen_hwaes.cc" + "random/internal/randen_hwaes.h" + "random/internal/randen_round_keys.cc" + "random/internal/randen_slow.cc" + "random/internal/randen_slow.h" + "random/internal/randen_traits.h" + "random/internal/salted_seed_seq.h" + "random/internal/seed_material.cc" + "random/internal/seed_material.h" + "random/internal/sequence_urbg.h" + "random/internal/traits.h" + "random/internal/uniform_helper.h" + "random/internal/wide_multiply.h" + "random/log_uniform_int_distribution.h" + "random/poisson_distribution.h" + "random/random.h" + "random/seed_gen_exception.cc" + "random/seed_gen_exception.h" + "random/seed_sequences.cc" + "random/seed_sequences.h" + "random/uniform_int_distribution.h" + "random/uniform_real_distribution.h" + "random/zipf_distribution.h" + "status/internal/status_internal.h" + "status/internal/status_internal.cc" + "status/internal/statusor_internal.h" + "status/status.h" + "status/status.cc" + "status/statusor.h" + "status/statusor.cc" + "status/status_payload_printer.h" + "status/status_payload_printer.cc" + "strings/ascii.cc" + "strings/ascii.h" + "strings/charconv.cc" + "strings/charconv.h" + "strings/charset.h" + "strings/cord.cc" + "strings/cord.h" + "strings/cord_analysis.cc" + "strings/cord_analysis.h" + "strings/cord_buffer.cc" + "strings/cord_buffer.h" + "strings/escaping.cc" + "strings/escaping.h" + "strings/internal/charconv_bigint.cc" + "strings/internal/charconv_bigint.h" + "strings/internal/charconv_parse.cc" + "strings/internal/charconv_parse.h" + "strings/internal/cord_data_edge.h" + "strings/internal/cord_internal.cc" + "strings/internal/cord_internal.h" + "strings/internal/cord_rep_btree.cc" + "strings/internal/cord_rep_btree.h" + "strings/internal/cord_rep_btree_navigator.cc" + "strings/internal/cord_rep_btree_navigator.h" + "strings/internal/cord_rep_btree_reader.cc" + "strings/internal/cord_rep_btree_reader.h" + "strings/internal/cord_rep_crc.cc" + "strings/internal/cord_rep_crc.h" + "strings/internal/cord_rep_consume.h" + "strings/internal/cord_rep_consume.cc" + "strings/internal/cord_rep_flat.h" + "strings/internal/cordz_functions.cc" + "strings/internal/cordz_functions.h" + "strings/internal/cordz_handle.cc" + "strings/internal/cordz_handle.h" + "strings/internal/cordz_info.cc" + "strings/internal/cordz_info.h" + "strings/internal/cordz_sample_token.cc" + "strings/internal/cordz_sample_token.h" + "strings/internal/cordz_statistics.h" + "strings/internal/cordz_update_scope.h" + "strings/internal/cordz_update_tracker.h" + "strings/internal/damerau_levenshtein_distance.h" + "strings/internal/damerau_levenshtein_distance.cc" + "strings/internal/stl_type_traits.h" + "strings/internal/string_constant.h" + "strings/internal/stringify_sink.h" + "strings/internal/stringify_sink.cc" + "strings/internal/has_absl_stringify.h" + "strings/has_absl_stringify.h" + "strings/has_ostream_operator.h" + "strings/match.cc" + "strings/match.h" + "strings/numbers.cc" + "strings/numbers.h" + "strings/str_format.h" + "strings/str_cat.cc" + "strings/str_cat.h" + "strings/str_join.h" + "strings/str_replace.cc" + "strings/str_replace.h" + "strings/str_split.cc" + "strings/str_split.h" + "strings/string_view.cc" + "strings/string_view.h" + "strings/strip.h" + "strings/substitute.cc" + "strings/substitute.h" + "strings/internal/escaping.h" + "strings/internal/escaping.cc" + "strings/internal/memutil.cc" + "strings/internal/memutil.h" + "strings/internal/ostringstream.cc" + "strings/internal/ostringstream.h" + "strings/internal/pow10_helper.cc" + "strings/internal/pow10_helper.h" + "strings/internal/resize_uninitialized.h" + "strings/internal/str_format/arg.cc" + "strings/internal/str_format/arg.h" + "strings/internal/str_format/bind.cc" + "strings/internal/str_format/bind.h" + "strings/internal/str_format/checker.h" + "strings/internal/str_format/constexpr_parser.h" + "strings/internal/str_format/extension.cc" + "strings/internal/str_format/extension.h" + "strings/internal/str_format/float_conversion.cc" + "strings/internal/str_format/float_conversion.h" + "strings/internal/str_format/output.cc" + "strings/internal/str_format/output.h" + "strings/internal/str_format/parser.cc" + "strings/internal/str_format/parser.h" + "strings/internal/str_join_internal.h" + "strings/internal/str_split_internal.h" + "strings/internal/utf8.cc" + "strings/internal/utf8.h" + "synchronization/barrier.cc" + "synchronization/barrier.h" + "synchronization/blocking_counter.cc" + "synchronization/blocking_counter.h" + "synchronization/mutex.cc" + "synchronization/mutex.h" + "synchronization/notification.cc" + "synchronization/notification.h" + "synchronization/internal/create_thread_identity.cc" + "synchronization/internal/create_thread_identity.h" + "synchronization/internal/futex.h" + "synchronization/internal/futex_waiter.h" + "synchronization/internal/futex_waiter.cc" + "synchronization/internal/graphcycles.cc" + "synchronization/internal/graphcycles.h" + "synchronization/internal/kernel_timeout.h" + "synchronization/internal/kernel_timeout.cc" + "synchronization/internal/per_thread_sem.cc" + "synchronization/internal/per_thread_sem.h" + "synchronization/internal/pthread_waiter.h" + "synchronization/internal/pthread_waiter.cc" + "synchronization/internal/sem_waiter.h" + "synchronization/internal/sem_waiter.cc" + "synchronization/internal/stdcpp_waiter.h" + "synchronization/internal/stdcpp_waiter.cc" + "synchronization/internal/thread_pool.h" + "synchronization/internal/waiter.h" + "synchronization/internal/waiter_base.h" + "synchronization/internal/waiter_base.cc" + "synchronization/internal/win32_waiter.h" + "synchronization/internal/win32_waiter.cc" + "time/civil_time.cc" + "time/civil_time.h" + "time/clock.cc" + "time/clock.h" + "time/duration.cc" + "time/format.cc" + "time/time.cc" + "time/time.h" + "time/internal/cctz/include/cctz/civil_time.h" + "time/internal/cctz/include/cctz/civil_time_detail.h" + "time/internal/cctz/include/cctz/time_zone.h" + "time/internal/cctz/include/cctz/zone_info_source.h" + "time/internal/cctz/src/civil_time_detail.cc" + "time/internal/cctz/src/time_zone_fixed.cc" + "time/internal/cctz/src/time_zone_fixed.h" + "time/internal/cctz/src/time_zone_format.cc" + "time/internal/cctz/src/time_zone_if.cc" + "time/internal/cctz/src/time_zone_if.h" + "time/internal/cctz/src/time_zone_impl.cc" + "time/internal/cctz/src/time_zone_impl.h" + "time/internal/cctz/src/time_zone_info.cc" + "time/internal/cctz/src/time_zone_info.h" + "time/internal/cctz/src/time_zone_libc.cc" + "time/internal/cctz/src/time_zone_libc.h" + "time/internal/cctz/src/time_zone_lookup.cc" + "time/internal/cctz/src/time_zone_posix.cc" + "time/internal/cctz/src/time_zone_posix.h" + "time/internal/cctz/src/tzfile.h" + "time/internal/cctz/src/zone_info_source.cc" + "types/any.h" + "types/bad_any_cast.cc" + "types/bad_any_cast.h" + "types/bad_optional_access.cc" + "types/bad_optional_access.h" + "types/bad_variant_access.cc" + "types/bad_variant_access.h" + "types/compare.h" + "types/internal/variant.h" + "types/optional.h" + "types/internal/optional.h" + "types/span.h" + "types/internal/span.h" + "types/variant.h" + "utility/internal/if_constexpr.h" + "utility/utility.h" + "debugging/leak_check.cc" +) + +set(ABSL_INTERNAL_DLL_TARGETS + "absl_check" + "absl_log" + "algorithm" + "algorithm_container" + "any" + "any_invocable" + "atomic_hook" + "bad_any_cast" + "bad_any_cast_impl" + "bad_optional_access" + "bad_variant_access" + "base" + "base_internal" + "bind_front" + "bits" + "btree" + "check" + "city" + "civil_time" + "compare" + "compressed_tuple" + "config" + "container" + "container_common" + "container_memory" + "cord" + "cord_internal" + "cordz_functions" + "cordz_handle" + "cordz_info" + "cordz_sample_token" + "core_headers" + "counting_allocator" + "crc_cord_state" + "crc_cpu_detect" + "crc_internal" + "crc32c" + "debugging" + "debugging_internal" + "demangle_internal" + "die_if_null" + "dynamic_annotations" + "endian" + "examine_stack" + "exponential_biased" + "failure_signal_handler" + "fixed_array" + "flat_hash_map" + "flat_hash_set" + "function_ref" + "graphcycles_internal" + "hash" + "hash_function_defaults" + "hash_policy_traits" + "hashtable_debug" + "hashtable_debug_hooks" + "hashtablez_sampler" + "inlined_vector" + "inlined_vector_internal" + "int128" + "kernel_timeout_internal" + "layout" + "leak_check" + "log_internal_check_impl" + "log_internal_check_op" + "log_internal_conditions" + "log_internal_config" + "log_internal_format" + "log_internal_globals" + "log_internal_log_impl" + "log_internal_proto" + "log_internal_message" + "log_internal_log_sink_set" + "log_internal_nullguard" + "log_internal_nullstream" + "log_internal_strip" + "log_internal_voidify" + "log_internal_append_truncated" + "log_globals" + "log_initialize" + "log" + "log_entry" + "log_sink" + "log_sink_registry" + "log_streamer" + "log_internal_structured" + "log_severity" + "log_structured" + "low_level_hash" + "malloc_internal" + "memory" + "meta" + "node_hash_map" + "node_hash_set" + "node_slot_policy" + "non_temporal_arm_intrinsics" + "non_temporal_memcpy" + "numeric" + "optional" + "periodic_sampler" + "pow10_helper" + "pretty_function" + "random_bit_gen_ref" + "random_distributions" + "random_internal_distribution_caller" + "random_internal_distributions" + "random_internal_explicit_seed_seq" + "random_internal_fastmath" + "random_internal_fast_uniform_bits" + "random_internal_generate_real" + "random_internal_iostream_state_saver" + "random_internal_nonsecure_base" + "random_internal_pcg_engine" + "random_internal_platform" + "random_internal_pool_urbg" + "random_internal_randen" + "random_internal_randen_engine" + "random_internal_randen_hwaes" + "random_internal_randen_hwaes_impl" + "random_internal_randen_slow" + "random_internal_salted_seed_seq" + "random_internal_seed_material" + "random_internal_sequence_urbg" + "random_internal_traits" + "random_internal_uniform_helper" + "random_internal_wide_multiply" + "random_random" + "random_seed_gen_exception" + "random_seed_sequences" + "raw_hash_map" + "raw_hash_set" + "raw_logging_internal" + "sample_recorder" + "scoped_set_env" + "span" + "spinlock_wait" + "spy_hash_state" + "stack_consumption" + "stacktrace" + "status" + "statusor" + "str_format" + "str_format_internal" + "strerror" + "strings" + "strings_internal" + "symbolize" + "synchronization" + "thread_pool" + "throw_delegate" + "time" + "time_zone" + "tracked" + "type_traits" + "utility" + "variant" +) + +set(ABSL_INTERNAL_TEST_DLL_FILES + "hash/hash_testing.h" + "log/scoped_mock_log.cc" + "log/scoped_mock_log.h" + "random/internal/chi_square.cc" + "random/internal/chi_square.h" + "random/internal/distribution_test_util.cc" + "random/internal/distribution_test_util.h" + "random/internal/mock_helpers.h" + "random/internal/mock_overload_set.h" + "random/mocking_bit_gen.h" + "random/mock_distributions.h" + "strings/cordz_test_helpers.h" + "strings/cord_test_helpers.h" +) + +set(ABSL_INTERNAL_TEST_DLL_TARGETS + "cord_test_helpers" + "cordz_test_helpers" + "hash_testing" + "random_mocking_bit_gen" + "random_internal_distribution_test_util" + "random_internal_mock_overload_set" + "scoped_mock_log" +) + +include(CheckCXXSourceCompiles) + +check_cxx_source_compiles( + [==[ +#ifdef _MSC_VER +# if _MSVC_LANG < 201703L +# error "The compiler defaults or is configured for C++ < 17" +# endif +#elif __cplusplus < 201703L +# error "The compiler defaults or is configured for C++ < 17" +#endif +int main() { return 0; } +]==] + ABSL_INTERNAL_AT_LEAST_CXX17) + +check_cxx_source_compiles( + [==[ +#ifdef _MSC_VER +# if _MSVC_LANG < 202002L +# error "The compiler defaults or is configured for C++ < 20" +# endif +#elif __cplusplus < 202002L +# error "The compiler defaults or is configured for C++ < 20" +#endif +int main() { return 0; } +]==] + ABSL_INTERNAL_AT_LEAST_CXX20) + +if(ABSL_INTERNAL_AT_LEAST_CXX20) + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_20) +elseif(ABSL_INTERNAL_AT_LEAST_CXX17) + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) +else() + set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_14) +endif() + +function(absl_internal_dll_contains) + cmake_parse_arguments(ABSL_INTERNAL_DLL + "" + "OUTPUT;TARGET" + "" + ${ARGN} + ) + + STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_DLL_TARGET}) + + list(FIND + ABSL_INTERNAL_DLL_TARGETS + "${_target}" + _index) + + if (${_index} GREATER -1) + set(${ABSL_INTERNAL_DLL_OUTPUT} 1 PARENT_SCOPE) + else() + set(${ABSL_INTERNAL_DLL_OUTPUT} 0 PARENT_SCOPE) + endif() +endfunction() + +function(absl_internal_test_dll_contains) + cmake_parse_arguments(ABSL_INTERNAL_TEST_DLL + "" + "OUTPUT;TARGET" + "" + ${ARGN} + ) + + STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_TEST_DLL_TARGET}) + + list(FIND + ABSL_INTERNAL_TEST_DLL_TARGETS + "${_target}" + _index) + + if (${_index} GREATER -1) + set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 1 PARENT_SCOPE) + else() + set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 0 PARENT_SCOPE) + endif() +endfunction() + +function(absl_internal_dll_targets) + cmake_parse_arguments(ABSL_INTERNAL_DLL + "" + "OUTPUT" + "DEPS" + ${ARGN} + ) + + set(_deps "") + foreach(dep IN LISTS ABSL_INTERNAL_DLL_DEPS) + absl_internal_dll_contains(TARGET ${dep} OUTPUT _dll_contains) + absl_internal_test_dll_contains(TARGET ${dep} OUTPUT _test_dll_contains) + if (_dll_contains) + list(APPEND _deps abseil_dll) + elseif (_test_dll_contains) + list(APPEND _deps abseil_test_dll) + else() + list(APPEND _deps ${dep}) + endif() + endforeach() + + # Because we may have added the DLL multiple times + list(REMOVE_DUPLICATES _deps) + set(${ABSL_INTERNAL_DLL_OUTPUT} "${_deps}" PARENT_SCOPE) +endfunction() + +function(absl_make_dll) + cmake_parse_arguments(ABSL_INTERNAL_MAKE_DLL + "" + "TEST" + "" + ${ARGN} + ) + + if (ABSL_INTERNAL_MAKE_DLL_TEST) + set(_dll "abseil_test_dll") + set(_dll_files ${ABSL_INTERNAL_TEST_DLL_FILES}) + set(_dll_libs "abseil_dll" "GTest::gtest" "GTest::gmock") + set(_dll_compile_definitions "GTEST_LINKED_AS_SHARED_LIBRARY=1") + set(_dll_includes ${absl_gtest_src_dir}/googletest/include ${absl_gtest_src_dir}/googlemock/include) + set(_dll_consume "ABSL_CONSUME_TEST_DLL") + set(_dll_build "ABSL_BUILD_TEST_DLL") + else() + set(_dll "abseil_dll") + set(_dll_files ${ABSL_INTERNAL_DLL_FILES}) + set(_dll_libs "") + set(_dll_compile_definitions "") + set(_dll_includes "") + set(_dll_consume "ABSL_CONSUME_DLL") + set(_dll_build "ABSL_BUILD_DLL") + endif() + + add_library( + ${_dll} + SHARED + ${_dll_files} + ) + target_link_libraries( + ${_dll} + PRIVATE + ${_dll_libs} + ${ABSL_DEFAULT_LINKOPTS} + ) + set_property(TARGET ${_dll} PROPERTY LINKER_LANGUAGE "CXX") + target_include_directories( + ${_dll} + PUBLIC + "$" + $ + PRIVATE + ${_dll_includes} + ) + + target_compile_options( + ${_dll} + PRIVATE + ${ABSL_DEFAULT_COPTS} + ) + + foreach(cflag ${ABSL_CC_LIB_COPTS}) + if(${cflag} MATCHES "^(-Wno|/wd)") + # These flags are needed to suppress warnings that might fire in our headers. + set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") + elseif(${cflag} MATCHES "^(-W|/w[1234eo])") + # Don't impose our warnings on others. + else() + set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") + endif() + endforeach() + string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}") + + FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/${_dll}.pc" CONTENT "\ +prefix=${CMAKE_INSTALL_PREFIX}\n\ +exec_prefix=\${prefix}\n\ +libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ +includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ +\n\ +Name: ${_dll}\n\ +Description: Abseil DLL library\n\ +URL: https://abseil.io/\n\ +Version: ${absl_VERSION}\n\ +Libs: -L\${libdir} $<$>:-l${_dll}> ${PC_LINKOPTS}\n\ +Cflags: -I\${includedir}${PC_CFLAGS}\n") + INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/${_dll}.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + target_compile_definitions( + ${_dll} + PUBLIC + ${_dll_compile_definitions} + PRIVATE + ${_dll_build} + NOMINMAX + INTERFACE + ${ABSL_CC_LIB_DEFINES} + ${_dll_consume} + ) + + if(ABSL_PROPAGATE_CXX_STD) + # Abseil libraries require C++14 as the current minimum standard. When + # compiled with a higher minimum (either because it is the compiler's + # default or explicitly requested), then Abseil requires that standard. + target_compile_features(${_dll} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) + endif() + + install(TARGETS ${_dll} EXPORT ${PROJECT_NAME}Targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + + add_library(absl::${_dll} ALIAS ${_dll}) +endfunction() diff --git a/third_party/absl/CMake/AbseilHelpers.cmake b/third_party/absl/CMake/AbseilHelpers.cmake new file mode 100644 index 000000000..c53b3584d --- /dev/null +++ b/third_party/absl/CMake/AbseilHelpers.cmake @@ -0,0 +1,445 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include(CMakeParseArguments) +include(AbseilConfigureCopts) +include(AbseilDll) + +# The IDE folder for Abseil that will be used if Abseil is included in a CMake +# project that sets +# set_property(GLOBAL PROPERTY USE_FOLDERS ON) +# For example, Visual Studio supports folders. +if(NOT DEFINED ABSL_IDE_FOLDER) + set(ABSL_IDE_FOLDER Abseil) +endif() + +if(ABSL_USE_SYSTEM_INCLUDES) + set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD SYSTEM) +else() + set(ABSL_INTERNAL_INCLUDE_WARNING_GUARD "") +endif() + +# absl_cc_library() +# +# CMake function to imitate Bazel's cc_library rule. +# +# Parameters: +# NAME: name of target (see Note) +# HDRS: List of public header files for the library +# SRCS: List of source files for the library +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# PUBLIC: Add this so that this library will be exported under absl:: +# Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. +# TESTONLY: When added, this target will only be built if both +# BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON. +# +# Note: +# By default, absl_cc_library will always create a library named absl_${NAME}, +# and alias target absl::${NAME}. The absl:: form should always be used. +# This is to reduce namespace pollution. +# +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# ) +# absl_cc_library( +# NAME +# fantastic_lib +# SRCS +# "b.cc" +# DEPS +# absl::awesome # not "awesome" ! +# PUBLIC +# ) +# +# absl_cc_library( +# NAME +# main_lib +# ... +# DEPS +# absl::fantastic_lib +# ) +# +# TODO(b/320467376): Implement "ALWAYSLINK". +function(absl_cc_library) + cmake_parse_arguments(ABSL_CC_LIB + "DISABLE_INSTALL;PUBLIC;TESTONLY" + "NAME" + "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + if(ABSL_CC_LIB_TESTONLY AND + NOT ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR + (ABSL_BUILD_TEST_HELPERS AND ABSL_CC_LIB_PUBLIC))) + return() + endif() + + if(ABSL_ENABLE_INSTALL) + set(_NAME "${ABSL_CC_LIB_NAME}") + else() + set(_NAME "absl_${ABSL_CC_LIB_NAME}") + endif() + + # Check if this is a header-only library + # Note that as of February 2019, many popular OS's (for example, Ubuntu + # 16.04 LTS) only come with cmake 3.5 by default. For this reason, we can't + # use list(FILTER...) + set(ABSL_CC_SRCS "${ABSL_CC_LIB_SRCS}") + foreach(src_file IN LISTS ABSL_CC_SRCS) + if(${src_file} MATCHES ".*\\.(h|inc)") + list(REMOVE_ITEM ABSL_CC_SRCS "${src_file}") + endif() + endforeach() + + if(ABSL_CC_SRCS STREQUAL "") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + set(ABSL_CC_LIB_IS_INTERFACE 0) + endif() + + # Determine this build target's relationship to the DLL. It's one of four things: + # 1. "dll" -- This target is part of the DLL + # 2. "dll_dep" -- This target is not part of the DLL, but depends on the DLL. + # Note that we assume any target not in the DLL depends on the + # DLL. This is not a technical necessity but a convenience + # which happens to be true, because nearly every target is + # part of the DLL. + # 3. "shared" -- This is a shared library, perhaps on a non-windows platform + # where DLL doesn't make sense. + # 4. "static" -- This target does not depend on the DLL and should be built + # statically. + if (${ABSL_BUILD_DLL}) + if(ABSL_ENABLE_INSTALL) + absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) + absl_internal_test_dll_contains(TARGET ${_NAME} OUTPUT _in_test_dll) + else() + absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll) + absl_internal_test_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_test_dll) + endif() + if (${_in_dll} OR ${_in_test_dll}) + # This target should be replaced by the DLL + set(_build_type "dll") + set(ABSL_CC_LIB_IS_INTERFACE 1) + else() + # Building a DLL, but this target is not part of the DLL + set(_build_type "dll_dep") + endif() + elseif(BUILD_SHARED_LIBS) + set(_build_type "shared") + else() + set(_build_type "static") + endif() + + # Generate a pkg-config file for every library: + if(ABSL_ENABLE_INSTALL) + if(absl_VERSION) + set(PC_VERSION "${absl_VERSION}") + else() + set(PC_VERSION "head") + endif() + if(NOT _build_type STREQUAL "dll") + set(LNK_LIB "${LNK_LIB} -labsl_${_NAME}") + endif() + foreach(dep ${ABSL_CC_LIB_DEPS}) + if(${dep} MATCHES "^absl::(.*)") + # for DLL builds many libs are not created, but add + # the pkgconfigs nevertheless, pointing to the dll. + if(_build_type STREQUAL "dll") + # hide this MATCHES in an if-clause so it doesn't overwrite + # the CMAKE_MATCH_1 from (${dep} MATCHES "^absl::(.*)") + if(NOT PC_DEPS MATCHES "abseil_dll") + # Join deps with commas. + if(PC_DEPS) + set(PC_DEPS "${PC_DEPS},") + endif() + # don't duplicate dll-dep if it exists already + set(PC_DEPS "${PC_DEPS} abseil_dll = ${PC_VERSION}") + set(LNK_LIB "${LNK_LIB} -labseil_dll") + endif() + else() + # Join deps with commas. + if(PC_DEPS) + set(PC_DEPS "${PC_DEPS},") + endif() + set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}") + endif() + endif() + endforeach() + foreach(cflag ${ABSL_CC_LIB_COPTS}) + if(${cflag} MATCHES "^(-Wno|/wd)") + # These flags are needed to suppress warnings that might fire in our headers. + set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") + elseif(${cflag} MATCHES "^(-W|/w[1234eo])") + # Don't impose our warnings on others. + elseif(${cflag} MATCHES "^-m") + # Don't impose CPU instruction requirements on others, as + # the code performs feature detection on runtime. + else() + set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") + endif() + endforeach() + string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}") + FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\ +prefix=${CMAKE_INSTALL_PREFIX}\n\ +exec_prefix=\${prefix}\n\ +libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ +includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ +\n\ +Name: absl_${_NAME}\n\ +Description: Abseil ${_NAME} library\n\ +URL: https://abseil.io/\n\ +Version: ${PC_VERSION}\n\ +Requires:${PC_DEPS}\n\ +Libs: -L\${libdir} $<$>:${LNK_LIB}> ${PC_LINKOPTS}\n\ +Cflags: -I\${includedir}${PC_CFLAGS}\n") + INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + endif() + + if(NOT ABSL_CC_LIB_IS_INTERFACE) + if(_build_type STREQUAL "dll_dep") + # This target depends on the DLL. When adding dependencies to this target, + # any depended-on-target which is contained inside the DLL is replaced + # with a dependency on the DLL. + add_library(${_NAME} STATIC "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + absl_internal_dll_targets( + DEPS ${ABSL_CC_LIB_DEPS} + OUTPUT _dll_deps + ) + target_link_libraries(${_NAME} + PUBLIC ${_dll_deps} + PRIVATE + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + + if (ABSL_CC_LIB_TESTONLY) + set(_gtest_link_define "GTEST_LINKED_AS_SHARED_LIBRARY=1") + else() + set(_gtest_link_define) + endif() + + target_compile_definitions(${_NAME} + PUBLIC + ABSL_CONSUME_DLL + "${_gtest_link_define}" + ) + + elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared") + add_library(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_LIB_DEPS} + PRIVATE + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + else() + message(FATAL_ERROR "Invalid build type: ${_build_type}") + endif() + + # Linker language can be inferred from sources, but in the case of DLLs we + # don't have any .cc files so it would be ambiguous. We could set it + # explicitly only in the case of DLLs but, because "CXX" is always the + # correct linker language for static or for shared libraries, we set it + # unconditionally. + set_property(TARGET ${_NAME} PROPERTY LINKER_LANGUAGE "CXX") + + target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD} + PUBLIC + "$" + $ + ) + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_LIB_COPTS}) + target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) + + # Add all Abseil targets to a a folder in the IDE for organization. + if(ABSL_CC_LIB_PUBLIC) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) + elseif(ABSL_CC_LIB_TESTONLY) + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + else() + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal) + endif() + + if(ABSL_PROPAGATE_CXX_STD) + # Abseil libraries require C++14 as the current minimum standard. When + # compiled with a higher standard (either because it is the compiler's + # default or explicitly requested), then Abseil requires that standard. + target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) + endif() + + # When being installed, we lose the absl_ prefix. We want to put it back + # to have properly named lib files. This is a no-op when we are not being + # installed. + if(ABSL_ENABLE_INSTALL) + set_target_properties(${_NAME} PROPERTIES + OUTPUT_NAME "absl_${_NAME}" + SOVERSION "2401.0.0" + ) + endif() + else() + # Generating header-only library + add_library(${_NAME} INTERFACE) + target_include_directories(${_NAME} ${ABSL_INTERNAL_INCLUDE_WARNING_GUARD} + INTERFACE + "$" + $ + ) + + if (_build_type STREQUAL "dll") + set(ABSL_CC_LIB_DEPS abseil_dll) + endif() + + target_link_libraries(${_NAME} + INTERFACE + ${ABSL_CC_LIB_DEPS} + ${ABSL_CC_LIB_LINKOPTS} + ${ABSL_DEFAULT_LINKOPTS} + ) + target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) + + if(ABSL_PROPAGATE_CXX_STD) + # Abseil libraries require C++14 as the current minimum standard. + # Top-level application CMake projects should ensure a consistent C++ + # standard for all compiled sources by setting CMAKE_CXX_STANDARD. + target_compile_features(${_NAME} INTERFACE ${ABSL_INTERNAL_CXX_STD_FEATURE}) + endif() + endif() + + if(ABSL_ENABLE_INSTALL) + install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + endif() + + add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) +endfunction() + +# absl_cc_test() +# +# CMake function to imitate Bazel's cc_test rule. +# +# Parameters: +# NAME: name of target (see Usage below) +# SRCS: List of source files for the binary +# DEPS: List of other libraries to be linked in to the binary targets +# COPTS: List of private compile options +# DEFINES: List of public defines +# LINKOPTS: List of link options +# +# Note: +# By default, absl_cc_test will always create a binary named absl_${NAME}. +# This will also add it to ctest list as absl_${NAME}. +# +# Usage: +# absl_cc_library( +# NAME +# awesome +# HDRS +# "a.h" +# SRCS +# "a.cc" +# PUBLIC +# ) +# +# absl_cc_test( +# NAME +# awesome_test +# SRCS +# "awesome_test.cc" +# DEPS +# absl::awesome +# GTest::gmock +# GTest::gtest_main +# ) +function(absl_cc_test) + if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) + return() + endif() + + cmake_parse_arguments(ABSL_CC_TEST + "" + "NAME" + "SRCS;COPTS;DEFINES;LINKOPTS;DEPS" + ${ARGN} + ) + + set(_NAME "absl_${ABSL_CC_TEST_NAME}") + + add_executable(${_NAME} "") + target_sources(${_NAME} PRIVATE ${ABSL_CC_TEST_SRCS}) + target_include_directories(${_NAME} + PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} + PRIVATE ${absl_gtest_src_dir}/googletest/include ${absl_gtest_src_dir}/googlemock/include + ) + + if (${ABSL_BUILD_DLL}) + target_compile_definitions(${_NAME} + PUBLIC + ${ABSL_CC_TEST_DEFINES} + ABSL_CONSUME_DLL + ABSL_CONSUME_TEST_DLL + GTEST_LINKED_AS_SHARED_LIBRARY=1 + ) + + # Replace dependencies on targets inside the DLL with abseil_dll itself. + absl_internal_dll_targets( + DEPS ${ABSL_CC_TEST_DEPS} + OUTPUT ABSL_CC_TEST_DEPS + ) + absl_internal_dll_targets( + DEPS ${ABSL_CC_TEST_LINKOPTS} + OUTPUT ABSL_CC_TEST_LINKOPTS + ) + else() + target_compile_definitions(${_NAME} + PUBLIC + ${ABSL_CC_TEST_DEFINES} + ) + endif() + target_compile_options(${_NAME} + PRIVATE ${ABSL_CC_TEST_COPTS} + ) + + target_link_libraries(${_NAME} + PUBLIC ${ABSL_CC_TEST_DEPS} + PRIVATE ${ABSL_CC_TEST_LINKOPTS} + ) + # Add all Abseil targets to a folder in the IDE for organization. + set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) + + if(ABSL_PROPAGATE_CXX_STD) + # Abseil libraries require C++14 as the current minimum standard. + # Top-level application CMake projects should ensure a consistent C++ + # standard for all compiled sources by setting CMAKE_CXX_STANDARD. + target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE}) + endif() + + add_test(NAME ${_NAME} COMMAND ${_NAME}) +endfunction() diff --git a/third_party/absl/CMake/Googletest/CMakeLists.txt.in b/third_party/absl/CMake/Googletest/CMakeLists.txt.in new file mode 100644 index 000000000..75691b111 --- /dev/null +++ b/third_party/absl/CMake/Googletest/CMakeLists.txt.in @@ -0,0 +1,14 @@ +cmake_minimum_required(VERSION 3.10) + +project(googletest-external NONE) + +include(ExternalProject) +ExternalProject_Add(googletest + URL "${absl_gtest_download_url}" # May be empty + SOURCE_DIR "${absl_gtest_src_dir}" + BINARY_DIR "${absl_gtest_build_dir}" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/third_party/absl/CMake/Googletest/DownloadGTest.cmake b/third_party/absl/CMake/Googletest/DownloadGTest.cmake new file mode 100644 index 000000000..9d071c917 --- /dev/null +++ b/third_party/absl/CMake/Googletest/DownloadGTest.cmake @@ -0,0 +1,41 @@ +# Integrates googletest at configure time. Based on the instructions at +# https://github.com/google/googletest/tree/master/googletest#incorporating-into-an-existing-cmake-project + +# Set up the external googletest project, downloading the latest from Github +# master if requested. +configure_file( + ${CMAKE_CURRENT_LIST_DIR}/CMakeLists.txt.in + ${CMAKE_BINARY_DIR}/googletest-external/CMakeLists.txt +) + +set(ABSL_SAVE_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) +set(ABSL_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) +if (BUILD_SHARED_LIBS) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_CREATE_SHARED_LIBRARY=1") +endif() + +# Configure and build the googletest source. +execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-external ) +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() + +execute_process(COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-external) +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +set(CMAKE_CXX_FLAGS ${ABSL_SAVE_CMAKE_CXX_FLAGS}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${ABSL_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + +# Prevent overriding the parent project's compiler/linker settings on Windows +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +# Add googletest directly to our build. This defines the gtest and gtest_main +# targets. +add_subdirectory(${absl_gtest_src_dir} ${absl_gtest_build_dir} EXCLUDE_FROM_ALL) diff --git a/third_party/absl/CMake/README.md b/third_party/absl/CMake/README.md new file mode 100644 index 000000000..c7ddee644 --- /dev/null +++ b/third_party/absl/CMake/README.md @@ -0,0 +1,188 @@ +# Abseil CMake Build Instructions + +Abseil comes with a CMake build script ([CMakeLists.txt](../CMakeLists.txt)) +that can be used on a wide range of platforms ("C" stands for cross-platform.). +If you don't have CMake installed already, you can download it for free from +. + +CMake works by generating native makefiles or build projects that can +be used in the compiler environment of your choice. + +For API/ABI compatibility reasons, we strongly recommend building Abseil in a +subdirectory of your project or as an embedded dependency. + +## Incorporating Abseil Into a CMake Project + +The recommendations below are similar to those for using CMake within the +googletest framework +() + +### Step-by-Step Instructions + +1. If you want to build the Abseil tests, integrate the Abseil dependency +[Google Test](https://github.com/google/googletest) into your CMake +project. To disable Abseil tests, you have to pass either +`-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your +project with CMake. + +2. Download Abseil and copy it into a subdirectory in your CMake project or add +Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your +CMake project. + +3. You can then use the CMake command +[`add_subdirectory()`](https://cmake.org/cmake/help/latest/command/add_subdirectory.html) +to include Abseil directly in your CMake project. + +4. Add the **absl::** target you wish to use to the +[`target_link_libraries()`](https://cmake.org/cmake/help/latest/command/target_link_libraries.html) +section of your executable or of your library.
+Here is a short CMakeLists.txt example of an application project using Abseil. + +```cmake +cmake_minimum_required(VERSION 3.10) +project(my_app_project) + +# Pick the C++ standard to compile with. +# Abseil currently supports C++14, C++17, and C++20. +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +add_subdirectory(abseil-cpp) + +add_executable(my_exe source.cpp) +target_link_libraries(my_exe absl::base absl::synchronization absl::strings) +``` + +Note that if you are developing a library designed for use by other clients, you +should instead leave `CMAKE_CXX_STANDARD` unset (or only set if being built as +the current top-level CMake project) and configure the minimum required C++ +standard at the target level. If you require a later minimum C++ standard than +Abseil does, it's a good idea to also enforce that `CMAKE_CXX_STANDARD` (which +will control Abseil library targets) is set to at least that minimum. For +example: + +```cmake +cmake_minimum_required(VERSION 3.10) +project(my_lib_project) + +# Leave C++ standard up to the root application, so set it only if this is the +# current top-level CMake project. +if(CMAKE_SOURCE_DIR STREQUAL my_lib_project_SOURCE_DIR) + set(CMAKE_CXX_STANDARD 17) + set(CMAKE_CXX_STANDARD_REQUIRED ON) +endif() + +add_subdirectory(abseil-cpp) + +add_library(my_lib source.cpp) +target_link_libraries(my_lib absl::base absl::synchronization absl::strings) + +# Enforce that my_lib requires C++17. Important to document for clients that they +# must set CMAKE_CXX_STANDARD to 17 or higher for proper Abseil ABI compatibility +# (since otherwise, Abseil library targets could be compiled with a lower C++ +# standard than my_lib). +target_compile_features(my_lib PUBLIC cxx_std_17) +if(CMAKE_CXX_STANDARD LESS 17) + message(FATAL_ERROR + "my_lib_project requires CMAKE_CXX_STANDARD >= 17 (got: ${CMAKE_CXX_STANDARD})") +endif() +``` + +Then the top-level application project that uses your library is responsible for +setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high. + +### Running Abseil Tests with CMake + +Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests. Note that +BUILD_TESTING must also be on (the default). + +You will need to provide Abseil with a Googletest dependency. There are two +options for how to do this: + +* Use `-DABSL_USE_GOOGLETEST_HEAD`. This will automatically download the latest +Googletest source into the build directory at configure time. Googletest will +then be compiled directly alongside Abseil's tests. +* Manually integrate Googletest with your build. See +https://github.com/google/googletest/blob/master/googletest/README.md#using-cmake +for more information on using Googletest in a CMake project. + +For example, to run just the Abseil tests, you could use this script: + +``` +cd path/to/abseil-cpp +mkdir build +cd build +cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON .. +make -j +ctest +``` + +Currently, we only run our tests with CMake in a Linux environment, but we are +working on the rest of our supported platforms. See +https://github.com/abseil/abseil-cpp/projects/1 and +https://github.com/abseil/abseil-cpp/issues/109 for more information. + +### Available Abseil CMake Public Targets + +Here's a non-exhaustive list of Abseil CMake public targets: + +```cmake +absl::algorithm +absl::base +absl::debugging +absl::flat_hash_map +absl::flags +absl::memory +absl::meta +absl::numeric +absl::random_random +absl::strings +absl::synchronization +absl::time +absl::utility +``` + +## Traditional CMake Set-Up + +For larger projects, it may make sense to use the traditional CMake set-up where you build and install projects separately. + +First, you'd need to build and install Google Test: +``` +cmake -S /source/googletest -B /build/googletest -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/installation/dir -DBUILD_GMOCK=ON +cmake --build /build/googletest --target install +``` + +Then you need to configure and build Abseil. Make sure you enable `ABSL_USE_EXTERNAL_GOOGLETEST` and `ABSL_FIND_GOOGLETEST`. You also need to enable `ABSL_ENABLE_INSTALL` so that you can install Abseil itself. +``` +cmake -S /source/abseil-cpp -B /build/abseil-cpp -DCMAKE_PREFIX_PATH=/installation/dir -DCMAKE_INSTALL_PREFIX=/installation/dir -DABSL_ENABLE_INSTALL=ON -DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON +cmake --build /temporary/build/abseil-cpp +``` + +(`CMAKE_PREFIX_PATH` is where you already have Google Test installed; `CMAKE_INSTALL_PREFIX` is where you want to have Abseil installed; they can be different.) + +Run the tests: +``` +ctest --test-dir /temporary/build/abseil-cpp +``` + +And finally install: +``` +cmake --build /temporary/build/abseil-cpp --target install +``` + +# CMake Option Synopsis + +## Enable Standard CMake Installation + +`-DABSL_ENABLE_INSTALL=ON` + +## Google Test Options + +`-DABSL_BUILD_TESTING=ON` must be set to enable testing + +- Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default) + - Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON` + - Download specific Google Test version (ZIP archive): `-DABSL_GOOGLETEST_DOWNLOAD_URL=https://.../version.zip` + - Use Google Test from specific local directory: `-DABSL_LOCAL_GOOGLETEST_DIR=/path/to/googletest` +- Use Google Test included elsewhere in your project: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON` +- Use standard CMake `find_package(CTest)` to find installed Google Test: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON` diff --git a/third_party/absl/CMake/abslConfig.cmake.in b/third_party/absl/CMake/abslConfig.cmake.in new file mode 100644 index 000000000..62d246d01 --- /dev/null +++ b/third_party/absl/CMake/abslConfig.cmake.in @@ -0,0 +1,8 @@ +# absl CMake configuration file. + +include(CMakeFindDependencyMacro) +find_dependency(Threads) + +@PACKAGE_INIT@ + +include ("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") diff --git a/third_party/absl/CMake/install_test_project/CMakeLists.txt b/third_party/absl/CMake/install_test_project/CMakeLists.txt new file mode 100644 index 000000000..30c23b2c6 --- /dev/null +++ b/third_party/absl/CMake/install_test_project/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A simple CMakeLists.txt for testing cmake installation + +cmake_minimum_required(VERSION 3.10) +project(absl_cmake_testing CXX) + +add_executable(simple simple.cc) + +find_package(absl REQUIRED) + +target_link_libraries(simple absl::strings absl::config) diff --git a/third_party/absl/CMake/install_test_project/simple.cc b/third_party/absl/CMake/install_test_project/simple.cc new file mode 100644 index 000000000..7daa7f090 --- /dev/null +++ b/third_party/absl/CMake/install_test_project/simple.cc @@ -0,0 +1,32 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "absl/base/config.h" +#include "absl/strings/substitute.h" + +#if !defined(ABSL_LTS_RELEASE_VERSION) || ABSL_LTS_RELEASE_VERSION != 99998877 +#error ABSL_LTS_RELEASE_VERSION is not set correctly. +#endif + +#if !defined(ABSL_LTS_RELEASE_PATCH_LEVEL) || ABSL_LTS_RELEASE_PATCH_LEVEL != 0 +#error ABSL_LTS_RELEASE_PATCH_LEVEL is not set correctly. +#endif + +int main(int argc, char** argv) { + for (int i = 0; i < argc; ++i) { + std::cout << absl::Substitute("Arg $0: $1\n", i, argv[i]); + } +} diff --git a/third_party/absl/CMake/install_test_project/test.sh b/third_party/absl/CMake/install_test_project/test.sh new file mode 100755 index 000000000..cc028bac8 --- /dev/null +++ b/third_party/absl/CMake/install_test_project/test.sh @@ -0,0 +1,112 @@ +#!/bin/bash +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Unit and integration tests for Abseil LTS CMake installation + +# Fail on any error. Treat unset variables an error. Print commands as executed. +set -euox pipefail + +absl_dir=/abseil-cpp +absl_build_dir=/buildfs +googletest_builddir=/googletest_builddir +project_dir="${absl_dir}"/CMake/install_test_project +project_build_dir=/buildfs/project-build + +build_shared_libs="OFF" +if [ "${LINK_TYPE:-}" = "DYNAMIC" ]; then + build_shared_libs="ON" +fi + +# Build and install GoogleTest +mkdir "${googletest_builddir}" +pushd "${googletest_builddir}" +curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip +unzip "${ABSL_GOOGLETEST_COMMIT}".zip +pushd "googletest-${ABSL_GOOGLETEST_COMMIT}" +mkdir build +pushd build +cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" .. +make -j $(nproc) +make install +ldconfig +popd +popd +popd + +# Run the LTS transformations +./create_lts.py 99998877 + +# Build and install Abseil +pushd "${absl_build_dir}" +cmake "${absl_dir}" \ + -DABSL_USE_EXTERNAL_GOOGLETEST=ON \ + -DABSL_FIND_GOOGLETEST=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DABSL_BUILD_TESTING=ON \ + -DBUILD_SHARED_LIBS="${build_shared_libs}" +make -j $(nproc) +ctest -j $(nproc) --output-on-failure +make install +ldconfig +popd + +# Test the project against the installed Abseil +mkdir -p "${project_build_dir}" +pushd "${project_build_dir}" +cmake "${project_dir}" +cmake --build . --target simple + +output="$(${project_build_dir}/simple "printme" 2>&1)" +if [[ "${output}" != *"Arg 1: printme"* ]]; then + echo "Faulty output on simple project:" + echo "${output}" + exit 1 +fi + +popd + +if ! grep absl::strings "/usr/local/lib/cmake/absl/abslTargets.cmake"; then + cat "/usr/local/lib/cmake/absl/abslTargets.cmake" + echo "CMake targets named incorrectly" + exit 1 +fi + +pushd "${HOME}" +cat > hello-abseil.cc << EOF +#include + +#include "absl/strings/str_format.h" + +int main(int argc, char **argv) { + absl::PrintF("Hello Abseil!\n"); + return EXIT_SUCCESS; +} +EOF + +if [ "${LINK_TYPE:-}" != "DYNAMIC" ]; then + pc_args=($(pkg-config --cflags --libs --static absl_str_format)) + g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}" +else + pc_args=($(pkg-config --cflags --libs absl_str_format)) + g++ -o hello-abseil hello-abseil.cc "${pc_args[@]}" +fi +hello="$(./hello-abseil)" +[[ "${hello}" == "Hello Abseil!" ]] + +popd + +echo "Install test complete!" +exit 0 diff --git a/third_party/absl/CMakeLists.txt b/third_party/absl/CMakeLists.txt new file mode 100644 index 000000000..194f87086 --- /dev/null +++ b/third_party/absl/CMakeLists.txt @@ -0,0 +1,270 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md +# As of 2022-09-06, CMake 3.10 is the minimum supported version. +cmake_minimum_required(VERSION 3.10) + +# Compiler id for Apple Clang is now AppleClang. +if (POLICY CMP0025) + cmake_policy(SET CMP0025 NEW) +endif (POLICY CMP0025) + +# if command can use IN_LIST +if (POLICY CMP0057) + cmake_policy(SET CMP0057 NEW) +endif (POLICY CMP0057) + +# Project version variables are the empty string if version is unspecified +if (POLICY CMP0048) + cmake_policy(SET CMP0048 NEW) +endif (POLICY CMP0048) + +# Honor the GTest_ROOT variable if specified +if (POLICY CMP0074) + cmake_policy(SET CMP0074 NEW) +endif (POLICY CMP0074) + +# option() honor variables +if (POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif (POLICY CMP0077) + +# Allow the user to specify the MSVC runtime +if (POLICY CMP0091) + cmake_policy(SET CMP0091 NEW) +endif (POLICY CMP0091) + +# try_compile() honors the CMAKE_CXX_STANDARD value +if (POLICY CMP0067) + cmake_policy(SET CMP0067 NEW) +endif (POLICY CMP0067) + +# Allow the user to specify the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT +if (POLICY CMP0141) + cmake_policy(SET CMP0141 NEW) +endif (POLICY CMP0141) + +project(absl LANGUAGES CXX VERSION 20240116) +include(CTest) + +# Output directory is correct by default for most build setups. However, when +# building Abseil as a DLL, it is important to have the DLL in the same +# directory as the executable using it. Thus, we put all executables in a single +# /bin directory. +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +# when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) +# in the source tree of a project that uses it, install rules are disabled. +if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) + option(ABSL_ENABLE_INSTALL "Enable install rule" OFF) +else() + option(ABSL_ENABLE_INSTALL "Enable install rule" ON) +endif() + +option(ABSL_PROPAGATE_CXX_STD + "Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil" + OFF) # TODO: Default to ON for CMake 3.8 and greater. +if(NOT ABSL_PROPAGATE_CXX_STD) + message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.") +endif() + +option(ABSL_USE_SYSTEM_INCLUDES + "Silence warnings in Abseil headers by marking them as SYSTEM includes" + OFF) + +list(APPEND CMAKE_MODULE_PATH + ${CMAKE_CURRENT_LIST_DIR}/CMake + ${CMAKE_CURRENT_LIST_DIR}/absl/copts +) + +include(CMakePackageConfigHelpers) +include(GNUInstallDirs) +include(AbseilDll) +include(AbseilHelpers) + + +## +## Using absl targets +## +## all public absl targets are +## exported with the absl:: prefix +## +## e.g absl::base absl::synchronization absl::strings .... +## +## DO NOT rely on the internal targets outside of the prefix + + +# include current path +list(APPEND ABSL_COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) + +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + set(ABSL_USING_CLANG ON) +else() + set(ABSL_USING_CLANG OFF) +endif() + +# find dependencies +## pthread +find_package(Threads REQUIRED) + +include(CMakeDependentOption) + +option(ABSL_BUILD_TESTING + "If ON, Abseil will build all of Abseil's own tests." OFF) + +option(ABSL_BUILD_TEST_HELPERS + "If ON, Abseil will build libraries that you can use to write tests against Abseil code. This option requires that Abseil is configured to use GoogleTest." + OFF) + +option(ABSL_USE_EXTERNAL_GOOGLETEST + "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF) + +cmake_dependent_option(ABSL_FIND_GOOGLETEST + "If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project." + ON + "ABSL_USE_EXTERNAL_GOOGLETEST" + OFF) + + +option(ABSL_USE_GOOGLETEST_HEAD + "If ON, abseil will download HEAD from GoogleTest at config time." OFF) + +set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL") + +set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH + "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout." + ) + +if((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS) + if (ABSL_USE_EXTERNAL_GOOGLETEST) + if (ABSL_FIND_GOOGLETEST) + find_package(GTest REQUIRED) + elseif(NOT TARGET GTest::gtest) + if(TARGET gtest) + # When Google Test is included directly rather than through find_package, the aliases are missing. + add_library(GTest::gtest ALIAS gtest) + add_library(GTest::gtest_main ALIAS gtest_main) + add_library(GTest::gmock ALIAS gmock) + add_library(GTest::gmock_main ALIAS gmock_main) + else() + message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.") + endif() + endif() + else() + set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) + if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL) + message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL") + endif() + if(ABSL_USE_GOOGLETEST_HEAD) + set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip") + elseif(ABSL_GOOGLETEST_DOWNLOAD_URL) + set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL}) + endif() + if(absl_gtest_download_url) + set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) + else() + set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR}) + endif() + include(CMake/Googletest/DownloadGTest.cmake) + endif() +endif() + +add_subdirectory(absl) + +if(ABSL_ENABLE_INSTALL) + + + # install as a subdirectory only + install(EXPORT ${PROJECT_NAME}Targets + NAMESPACE absl:: + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + ) + + configure_package_config_file( + CMake/abslConfig.cmake.in + "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + ) + install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + ) + + # Abseil only has a version in LTS releases. This mechanism is accomplished + # Abseil's internal Copybara (https://github.com/google/copybara) workflows and + # isn't visible in the CMake buildsystem itself. + if(absl_VERSION) + write_basic_package_version_file( + "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + COMPATIBILITY ExactVersion + ) + + install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + ) + endif() # absl_VERSION + + install(DIRECTORY absl + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING + PATTERN "*.inc" + PATTERN "*.h" + PATTERN "copts" EXCLUDE + PATTERN "testdata" EXCLUDE + ) + + # Rewrite options.h to use the compiled ABI. + file(READ "absl/base/options.h" ABSL_INTERNAL_OPTIONS_H_CONTENTS) + + # Handle features that require at least C++20. + if (ABSL_INTERNAL_AT_LEAST_CXX20) + foreach(FEATURE "ORDERING") + string(REPLACE + "#define ABSL_OPTION_USE_STD_${FEATURE} 2" + "#define ABSL_OPTION_USE_STD_${FEATURE} 1" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + set(ABSL_INTERNAL_OPTIONS_H_CONTENTS "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + endforeach() + endif() + + # Handle features that require at least C++17. + if (ABSL_INTERNAL_AT_LEAST_CXX17) + foreach(FEATURE "ANY" "OPTIONAL" "STRING_VIEW" "VARIANT") + string(REPLACE + "#define ABSL_OPTION_USE_STD_${FEATURE} 2" + "#define ABSL_OPTION_USE_STD_${FEATURE} 1" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + set(ABSL_INTERNAL_OPTIONS_H_CONTENTS "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + endforeach() + endif() + + # Any feature that still has the value of 2 (because it was not handled above) + # should be set to 0. + string(REGEX REPLACE + "#define ABSL_OPTION_USE_STD_([^ ]*) 2" + "#define ABSL_OPTION_USE_STD_\\1 0" + ABSL_INTERNAL_OPTIONS_H_PINNED + "${ABSL_INTERNAL_OPTIONS_H_CONTENTS}") + + file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}") + + install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/absl/base + RENAME "options.h") + +endif() # ABSL_ENABLE_INSTALL diff --git a/third_party/absl/CONTRIBUTING.md b/third_party/absl/CONTRIBUTING.md new file mode 100644 index 000000000..a87254c50 --- /dev/null +++ b/third_party/absl/CONTRIBUTING.md @@ -0,0 +1,141 @@ +# How to Contribute to Abseil + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +NOTE: If you are new to GitHub, please start by reading [Pull Request +howto](https://help.github.com/articles/about-pull-requests/) + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Contribution Guidelines + +Potential contributors sometimes ask us if the Abseil project is the appropriate +home for their utility library code or for specific functions implementing +missing portions of the standard. Often, the answer to this question is "no". +We’d like to articulate our thinking on this issue so that our choices can be +understood by everyone and so that contributors can have a better intuition +about whether Abseil might be interested in adopting a new library. + +### Priorities + +Although our mission is to augment the C++ standard library, our goal is not to +provide a full forward-compatible implementation of the latest standard. For us +to consider a library for inclusion in Abseil, it is not enough that a library +is useful. We generally choose to release a library when it meets at least one +of the following criteria: + +* **Widespread usage** - Using our internal codebase to help gauge usage, most + of the libraries we've released have tens of thousands of users. +* **Anticipated widespread usage** - Pre-adoption of some standard-compliant + APIs may not have broad adoption initially but can be expected to pick up + usage when it replaces legacy APIs. `absl::from_chars`, for example, + replaces existing code that converts strings to numbers and will therefore + likely see usage growth. +* **High impact** - APIs that provide a key solution to a specific problem, + such as `absl::FixedArray`, have higher impact than usage numbers may signal + and are released because of their importance. +* **Direct support for a library that falls under one of the above** - When we + want access to a smaller library as an implementation detail for a + higher-priority library we plan to release, we may release it, as we did + with portions of `absl/meta/type_traits.h`. One consequence of this is that + the presence of a library in Abseil does not necessarily mean that other + similar libraries would be a high priority. + +### API Freeze Consequences + +Via the +[Abseil Compatibility Guidelines](https://abseil.io/about/compatibility), we +have promised a large degree of API stability. In particular, we will not make +backward-incompatible changes to released APIs without also shipping a tool or +process that can upgrade our users' code. We are not yet at the point of easily +releasing such tools. Therefore, at this time, shipping a library establishes an +API contract which is borderline unchangeable. (We can add new functionality, +but we cannot easily change existing behavior.) This constraint forces us to +very carefully review all APIs that we ship. + + +## Coding Style + +To keep the source consistent, readable, diffable and easy to merge, we use a +fairly rigid coding style, as defined by the +[google-styleguide](https://github.com/google/styleguide) project. All patches +will be expected to conform to the style outlined +[here](https://google.github.io/styleguide/cppguide.html). + +## Guidelines for Pull Requests + +* If you are a Googler, it is required that you send us a Piper CL instead of + using the GitHub pull-request process. The code propagation process will + deliver the change to GitHub. + +* Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often receive PRs that are trying to fix several things at a + time, but if only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address + different concerns and everyone will be happy. + +* For speculative changes, consider opening an [Abseil + issue](https://github.com/abseil/abseil-cpp/issues) and discussing it first. + If you are suggesting a behavioral or API change, consider starting with an + [Abseil proposal template](ABSEIL_ISSUE_TEMPLATE.md). + +* Provide a good **PR description** as a record of **what** change is being + made and **why** it was made. Link to a GitHub issue if it exists. + +* Don't fix code style and formatting unless you are already changing that + line to address an issue. Formatting of modified lines may be done using + `git clang-format`. PRs with irrelevant changes won't be merged. If + you do want to fix formatting or style, do that in a separate PR. + +* Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 + weeks of inactivity. + +* Maintain **clean commit history** and use **meaningful commit messages**. + PRs with messy commit history are difficult to review and won't be merged. + Use `rebase -i upstream/master` to curate your commit history and/or to + bring in latest changes from master (but avoid rebasing in the middle of a + code review). + +* Keep your PR up to date with upstream/master (if there are merge conflicts, + we can't really merge your change). + +* **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** (see below) + +* Exceptions to the rules can be made if there's a compelling reason for doing + so. That is - the rules are here to serve us, not the other way around, and + the rules need to be serving their intended purpose to be valuable. + +* All submissions, including submissions by project members, require review. + +## Running Tests + +If you have [Bazel](https://bazel.build/) installed, use `bazel test +--test_tag_filters="-benchmark" ...` to run the unit tests. + +If you are running the Linux operating system and have +[Docker](https://www.docker.com/) installed, you can also run the `linux_*.sh` +scripts under the `ci/`(https://github.com/abseil/abseil-cpp/tree/master/ci) +directory to test Abseil under a variety of conditions. + +## Abseil Committers + +The current members of the Abseil engineering team are the only committers at +present. + +## Release Process + +Abseil lives at head, where latest-and-greatest code can be found. diff --git a/third_party/absl/FAQ.md b/third_party/absl/FAQ.md new file mode 100644 index 000000000..fbd92ce97 --- /dev/null +++ b/third_party/absl/FAQ.md @@ -0,0 +1,167 @@ +# Abseil FAQ + +## Is Abseil the right home for my utility library? + +Most often the answer to the question is "no." As both the [About +Abseil](https://abseil.io/about/) page and our [contributing +guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md#contribution-guidelines) +explain, Abseil contains a variety of core C++ library code that is widely used +at [Google](https://www.google.com/). As such, Abseil's primary purpose is to be +used as a dependency by Google's open source C++ projects. While we do hope that +Abseil is also useful to the C++ community at large, this added constraint also +means that we are unlikely to accept a contribution of utility code that isn't +already widely used by Google. + +## How to I set the C++ dialect used to build Abseil? + +The short answer is that whatever mechanism you choose, you need to make sure +that you set this option consistently at the global level for your entire +project. If, for example, you want to set the C++ dialect to C++17, with +[Bazel](https://bazel/build/) as the build system and `gcc` or `clang` as the +compiler, there several ways to do this: +* Pass `--cxxopt=-std=c++17` on the command line (for example, `bazel build + --cxxopt=-std=c++17 ...`) +* Set the environment variable `BAZEL_CXXOPTS` (for example, + `BAZEL_CXXOPTS=-std=c++17`) +* Add `build --cxxopt=-std=c++17` to your [`.bazelrc` + file](https://docs.bazel.build/versions/master/guide.html#bazelrc) + +If you are using CMake as the build system, you'll need to add a line like +`set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you +are developing a library designed to be used by other clients, you should +instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard +required by each of your library targets via `target_compile_features`. See the +[CMake build +instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md) +for more information. + +For a longer answer to this question and to understand why some other approaches +don't work, see the answer to ["What is ABI and why don't you recommend using a +pre-compiled version of +Abseil?"](#what-is-abi-and-why-dont-you-recommend-using-a-pre-compiled-version-of-abseil) + +## What is ABI and why don't you recommend using a pre-compiled version of Abseil? + +For the purposes of this discussion, you can think of +[ABI](https://en.wikipedia.org/wiki/Application_binary_interface) as the +compiled representation of the interfaces in code. This is in contrast to +[API](https://en.wikipedia.org/wiki/Application_programming_interface), which +you can think of as the interfaces as defined by the code itself. [Abseil has a +strong promise of API compatibility, but does not make any promise of ABI +compatibility](https://abseil.io/about/compatibility). Let's take a look at what +this means in practice. + +You might be tempted to do something like this in a +[Bazel](https://bazel.build/) `BUILD` file: + +``` +# DON'T DO THIS!!! +cc_library( + name = "my_library", + srcs = ["my_library.cc"], + copts = ["-std=c++17"], # May create a mixed-mode compile! + deps = ["@com_google_absl//absl/strings"], +) +``` + +Applying `-std=c++17` to an individual target in your `BUILD` file is going to +compile that specific target in C++17 mode, but it isn't going to ensure the +Abseil library is built in C++17 mode, since the Abseil library itself is a +different build target. If your code includes an Abseil header, then your +program may contain conflicting definitions of the same +class/function/variable/enum, etc. As a rule, all compile options that affect +the ABI of a program need to be applied to the entire build on a global basis. + +C++ has something called the [One Definition +Rule](https://en.wikipedia.org/wiki/One_Definition_Rule) (ODR). C++ doesn't +allow multiple definitions of the same class/function/variable/enum, etc. ODR +violations sometimes result in linker errors, but linkers do not always catch +violations. Uncaught ODR violations can result in strange runtime behaviors or +crashes that can be hard to debug. + +If you build the Abseil library and your code using different compile options +that affect ABI, there is a good chance you will run afoul of the One Definition +Rule. Examples of GCC compile options that affect ABI include (but aren't +limited to) language dialect (e.g. `-std=`), optimization level (e.g. `-O2`), +code generation flags (e.g. `-fexceptions`), and preprocessor defines +(e.g. `-DNDEBUG`). + +If you use a pre-compiled version of Abseil, (for example, from your Linux +distribution package manager or from something like +[vcpkg](https://github.com/microsoft/vcpkg)) you have to be very careful to +ensure ABI compatibility across the components of your program. The only way you +can be sure your program is going to be correct regarding ABI is to ensure +you've used the exact same compile options as were used to build the +pre-compiled library. This does not mean that Abseil cannot work as part of a +Linux distribution since a knowledgeable binary packager will have ensured that +all packages have been built with consistent compile options. This is one of the +reasons we warn against - though do not outright reject - using Abseil as a +pre-compiled library. + +Another possible way that you might afoul of ABI issues is if you accidentally +include two versions of Abseil in your program. Multiple versions of Abseil can +end up within the same binary if your program uses the Abseil library and +another library also transitively depends on Abseil (resulting in what is +sometimes called the diamond dependency problem). In cases such as this you must +structure your build so that all libraries use the same version of Abseil. +[Abseil's strong promise of API compatibility between +releases](https://abseil.io/about/compatibility) means the latest "HEAD" release +of Abseil is almost certainly the right choice if you are doing as we recommend +and building all of your code from source. + +For these reasons we recommend you avoid pre-compiled code and build the Abseil +library yourself in a consistent manner with the rest of your code. + +## What is "live at head" and how do I do it? + +From Abseil's point-of-view, "live at head" means that every Abseil source +release (which happens on an almost daily basis) is either API compatible with +the previous release, or comes with an automated tool that you can run over code +to make it compatible. In practice, the need to use an automated tool is +extremely rare. This means that upgrading from one source release to another +should be a routine practice that can and should be performed often. + +We recommend you update to the [latest commit in the `master` branch of +Abseil](https://github.com/abseil/abseil-cpp/commits/master) as often as +possible. Not only will you pick up bug fixes more quickly, but if you have good +automated testing, you will catch and be able to fix any [Hyrum's +Law](https://www.hyrumslaw.com/) dependency problems on an incremental basis +instead of being overwhelmed by them and having difficulty isolating them if you +wait longer between updates. + +If you are using the [Bazel](https://bazel.build/) build system and its +[external dependencies](https://docs.bazel.build/versions/master/external.html) +feature, updating the +[`http_archive`](https://docs.bazel.build/versions/master/repo/http.html#http_archive) +rule in your +[`WORKSPACE`](https://docs.bazel.build/versions/master/be/workspace.html) for +`com_google_abseil` to point to the [latest commit in the `master` branch of +Abseil](https://github.com/abseil/abseil-cpp/commits/master) is all you need to +do. For example, on February 11, 2020, the latest commit to the master branch +was `98eb410c93ad059f9bba1bf43f5bb916fc92a5ea`. To update to this commit, you +would add the following snippet to your `WORKSPACE` file: + +``` +http_archive( + name = "com_google_absl", + urls = ["https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip"], # 2020-02-11T18:50:53Z + strip_prefix = "abseil-cpp-98eb410c93ad059f9bba1bf43f5bb916fc92a5ea", + sha256 = "aabf6c57e3834f8dc3873a927f37eaf69975d4b28117fc7427dfb1c661542a87", +) +``` + +To get the `sha256` of this URL, run `curl -sL --output - +https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip +| sha256sum -`. + +You can commit the updated `WORKSPACE` file to your source control every time +you update, and if you have good automated testing, you might even consider +automating this. + +One thing we don't recommend is using GitHub's `master.zip` files (for example +[https://github.com/abseil/abseil-cpp/archive/master.zip](https://github.com/abseil/abseil-cpp/archive/master.zip)), +which are always the latest commit in the `master` branch, to implement live at +head. Since these `master.zip` URLs are not versioned, you will lose build +reproducibility. In addition, some build systems, including Bazel, will simply +cache this file, which means you won't actually be updating to the latest +release until your cache is cleared or invalidated. diff --git a/third_party/absl/LICENSE b/third_party/absl/LICENSE new file mode 100644 index 000000000..ccd61dcfe --- /dev/null +++ b/third_party/absl/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/third_party/absl/MODULE.bazel b/third_party/absl/MODULE.bazel new file mode 100644 index 000000000..9d60905a3 --- /dev/null +++ b/third_party/absl/MODULE.bazel @@ -0,0 +1,39 @@ +# Copyright 2024 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# https://bazel.build/external/overview#bzlmod + +module( + name = "abseil-cpp", + version = "20240116.2", + compatibility_level = 1, +) + +# Only direct dependencies need to be listed below. +# Please keep the versions in sync with the versions in the WORKSPACE file. + +bazel_dep(name = "bazel_skylib", + version = "1.5.0") + +bazel_dep(name = "google_benchmark", + version = "1.8.3", + repo_name = "com_github_google_benchmark", + dev_dependency = True) + +bazel_dep(name = "googletest", + version = "1.14.0.bcr.1", + repo_name = "com_google_googletest") + +bazel_dep(name = "platforms", + version = "0.0.8") diff --git a/third_party/absl/PrivacyInfo.xcprivacy b/third_party/absl/PrivacyInfo.xcprivacy new file mode 100644 index 000000000..6af16412a --- /dev/null +++ b/third_party/absl/PrivacyInfo.xcprivacy @@ -0,0 +1,14 @@ + + + + + NSPrivacyTracking + + NSPrivacyCollectedDataTypes + + NSPrivacyTrackingDomains + + NSPrivacyAccessedAPITypes + + + diff --git a/third_party/absl/README.md b/third_party/absl/README.md new file mode 100644 index 000000000..f834fcdec --- /dev/null +++ b/third_party/absl/README.md @@ -0,0 +1,160 @@ +# Abseil - C++ Common Libraries + +The repository contains the Abseil C++ library code. Abseil is an open-source +collection of C++ code (compliant to C++14) designed to augment the C++ +standard library. + +## Table of Contents + +- [About Abseil](#about) +- [Quickstart](#quickstart) +- [Building Abseil](#build) +- [Support](#support) +- [Codemap](#codemap) +- [Releases](#releases) +- [License](#license) +- [Links](#links) + + +## About Abseil + +Abseil is an open-source collection of C++ library code designed to augment +the C++ standard library. The Abseil library code is collected from Google's +own C++ code base, has been extensively tested and used in production, and +is the same code we depend on in our daily coding lives. + +In some cases, Abseil provides pieces missing from the C++ standard; in +others, Abseil provides alternatives to the standard for special needs +we've found through usage in the Google code base. We denote those cases +clearly within the library code we provide you. + +Abseil is not meant to be a competitor to the standard library; we've +just found that many of these utilities serve a purpose within our code +base, and we now want to provide those resources to the C++ community as +a whole. + + +## Quickstart + +If you want to just get started, make sure you at least run through the +[Abseil Quickstart](https://abseil.io/docs/cpp/quickstart). The Quickstart +contains information about setting up your development environment, downloading +the Abseil code, running tests, and getting a simple binary working. + + +## Building Abseil + +[Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official +build systems for Abseil. +See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information +on building Abseil using the Bazel build system. +If you require CMake support, please check the [CMake build +instructions](CMake/README.md) and [CMake +Quickstart](https://abseil.io/docs/cpp/quickstart-cmake). + + +## Support + +Abseil follows Google's [Foundational C++ Support +Policy](https://opensource.google/documentation/policies/cplusplus-support). See +[this +table](https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md) +for a list of currently supported versions compilers, platforms, and build +tools. + + +## Codemap + +Abseil contains the following C++ library components: + +* [`base`](absl/base/) +
The `base` library contains initialization code and other code which + all other Abseil code depends on. Code within `base` may not depend on any + other code (other than the C++ standard library). +* [`algorithm`](absl/algorithm/) +
The `algorithm` library contains additions to the C++ `` + library and container-based versions of such algorithms. +* [`cleanup`](absl/cleanup/) +
The `cleanup` library contains the control-flow-construct-like type + `absl::Cleanup` which is used for executing a callback on scope exit. +* [`container`](absl/container/) +
The `container` library contains additional STL-style containers, + including Abseil's unordered "Swiss table" containers. +* [`crc`](absl/crc/) The `crc` library contains code for + computing error-detecting cyclic redundancy checks on data. +* [`debugging`](absl/debugging/) +
The `debugging` library contains code useful for enabling leak + checks, and stacktrace and symbolization utilities. +* [`flags`](absl/flags/) +
The `flags` library contains code for handling command line flags for + libraries and binaries built with Abseil. +* [`hash`](absl/hash/) +
The `hash` library contains the hashing framework and default hash + functor implementations for hashable types in Abseil. +* [`log`](absl/log/) +
The `log` library contains `LOG` and `CHECK` macros and facilities + for writing logged messages out to disk, `stderr`, or user-extensible + destinations. +* [`memory`](absl/memory/) +
The `memory` library contains memory management facilities that augment + C++'s `` library. +* [`meta`](absl/meta/) +
The `meta` library contains compatible versions of type checks + available within C++14 and C++17 versions of the C++ `` library. +* [`numeric`](absl/numeric/) +
The `numeric` library contains 128-bit integer types as well as + implementations of C++20's bitwise math functions. +* [`profiling`](absl/profiling/) +
The `profiling` library contains utility code for profiling C++ + entities. It is currently a private dependency of other Abseil libraries. +* [`random`](absl/random/) +
The `random` library contains functions for generating psuedorandom + values. +* [`status`](absl/status/) +
The `status` library contains abstractions for error handling, + specifically `absl::Status` and `absl::StatusOr`. +* [`strings`](absl/strings/) +
The `strings` library contains a variety of strings routines and + utilities, including a C++14-compatible version of the C++17 + `std::string_view` type. +* [`synchronization`](absl/synchronization/) +
The `synchronization` library contains concurrency primitives (Abseil's + `absl::Mutex` class, an alternative to `std::mutex`) and a variety of + synchronization abstractions. +* [`time`](absl/time/) +
The `time` library contains abstractions for computing with absolute + points in time, durations of time, and formatting and parsing time within + time zones. +* [`types`](absl/types/) +
The `types` library contains non-container utility types, like a + C++14-compatible version of the C++17 `std::optional` type. +* [`utility`](absl/utility/) +
The `utility` library contains utility and helper code. + + +## Releases + +Abseil recommends users "live-at-head" (update to the latest commit from the +master branch as often as possible). However, we realize this philosophy doesn't +work for every project, so we also provide [Long Term Support +Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport +fixes for severe bugs. See our [release +management](https://abseil.io/about/releases) document for more details. + + +## License + +The Abseil C++ library is licensed under the terms of the Apache +license. See [LICENSE](LICENSE) for more information. + + +## Links + +For more information about Abseil: + +* Consult our [Abseil Introduction](https://abseil.io/about/intro) +* Read [Why Adopt Abseil](https://abseil.io/about/philosophy) to understand our + design philosophy. +* Peruse our + [Abseil Compatibility Guarantees](https://abseil.io/about/compatibility) to + understand both what we promise to you, and what we expect of you in return. diff --git a/third_party/absl/UPGRADES.md b/third_party/absl/UPGRADES.md new file mode 100644 index 000000000..3cac141d0 --- /dev/null +++ b/third_party/absl/UPGRADES.md @@ -0,0 +1,17 @@ +# C++ Upgrade Tools + +Abseil may occasionally release API-breaking changes. As noted in our +[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool +to do the work of effecting such API-breaking changes, when absolutely +necessary. + +These tools will be listed on the [C++ Upgrade Tools][upgrade-tools] guide on +https://abseil.io. + +For more information, the [C++ Automated Upgrade Guide][api-upgrades-guide] +outlines this process. + +[compatibility-guide]: https://abseil.io/about/compatibility +[api-upgrades-guide]: https://abseil.io/docs/cpp/tools/api-upgrades +[upgrade-tools]: https://abseil.io/docs/cpp/tools/upgrades/ + diff --git a/third_party/absl/WORKSPACE b/third_party/absl/WORKSPACE new file mode 100644 index 000000000..0d8860914 --- /dev/null +++ b/third_party/absl/WORKSPACE @@ -0,0 +1,59 @@ +# +# Copyright 2019 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +workspace(name = "com_google_absl") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +# GoogleTest/GoogleMock framework. Used by most unit-tests. +http_archive( + name = "com_google_googletest", + sha256 = "8ad598c73ad796e0d8280b082cebd82a630d73e73cd3c70057938a6501bba5d7", + strip_prefix = "googletest-1.14.0", + # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh and + # ci/windows_msvc_cmake.bat. + urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz"], +) + +# RE2 (the regular expression library used by GoogleTest) +http_archive( + name = "com_googlesource_code_re2", + sha256 = "828341ad08524618a626167bd320b0c2acc97bd1c28eff693a9ea33a7ed2a85f", + strip_prefix = "re2-2023-11-01", + urls = ["https://github.com/google/re2/releases/download/2023-11-01/re2-2023-11-01.zip"], +) + +# Google benchmark. +http_archive( + name = "com_github_google_benchmark", + sha256 = "6bc180a57d23d4d9515519f92b0c83d61b05b5bab188961f36ac7b06b0d9e9ce", + strip_prefix = "benchmark-1.8.3", + urls = ["https://github.com/google/benchmark/archive/refs/tags/v1.8.3.tar.gz"], +) + +# Bazel Skylib. +http_archive( + name = "bazel_skylib", + sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"], +) + +# Bazel platform rules. +http_archive( + name = "platforms", + sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74", + urls = ["https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz"], +) diff --git a/third_party/absl/absl/BUILD.bazel b/third_party/absl/absl/BUILD.bazel new file mode 100644 index 000000000..14c30b38c --- /dev/null +++ b/third_party/absl/absl/BUILD.bazel @@ -0,0 +1,162 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load("@bazel_skylib//lib:selects.bzl", "selects") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +config_setting( + name = "clang_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "clang", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "gcc_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "gcc", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "mingw_unspecified_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "mingw", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "mingw-gcc_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "mingw-gcc", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "msvc_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "msvc-cl", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "clang-cl_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "clang-cl", + }, + visibility = [":__subpackages__"], +) + +# x64_windows-clang-cl - used for selecting clang-cl for CI builds +platform( + name = "x64_windows-clang-cl", + constraint_values = [ + "@platforms//cpu:x86_64", + "@platforms//os:windows", + "@bazel_tools//tools/cpp:clang-cl", + ], + visibility = [":__subpackages__"], +) + +config_setting( + name = "osx", + constraint_values = [ + "@platforms//os:osx", + ], +) + +config_setting( + name = "ios", + constraint_values = [ + "@platforms//os:ios", + ], +) + +config_setting( + name = "ppc", + values = { + "cpu": "ppc", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "cpu_wasm", + values = { + "cpu": "wasm", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "cpu_wasm32", + values = { + "cpu": "wasm32", + }, + visibility = [":__subpackages__"], +) + +config_setting( + name = "platforms_wasm32", + constraint_values = [ + "@platforms//cpu:wasm32", + ], + visibility = [":__subpackages__"], +) + +config_setting( + name = "platforms_wasm64", + constraint_values = [ + "@platforms//cpu:wasm64", + ], + visibility = [":__subpackages__"], +) + +selects.config_setting_group( + name = "wasm", + match_any = [ + ":cpu_wasm", + ":cpu_wasm32", + ":platforms_wasm32", + ":platforms_wasm64", + ], + visibility = [":__subpackages__"], +) + +config_setting( + name = "fuchsia", + values = { + "cpu": "fuchsia", + }, + visibility = [":__subpackages__"], +) + +selects.config_setting_group( + name = "mingw_compiler", + match_any = [ + ":mingw_unspecified_compiler", + ":mingw-gcc_compiler", + ], + visibility = [":__subpackages__"], +) diff --git a/third_party/absl/absl/CMakeLists.txt b/third_party/absl/absl/CMakeLists.txt new file mode 100644 index 000000000..3a7c12fe9 --- /dev/null +++ b/third_party/absl/absl/CMakeLists.txt @@ -0,0 +1,44 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +add_subdirectory(base) +add_subdirectory(algorithm) +add_subdirectory(cleanup) +add_subdirectory(container) +add_subdirectory(crc) +add_subdirectory(debugging) +add_subdirectory(flags) +add_subdirectory(functional) +add_subdirectory(hash) +add_subdirectory(log) +add_subdirectory(memory) +add_subdirectory(meta) +add_subdirectory(numeric) +add_subdirectory(profiling) +add_subdirectory(random) +add_subdirectory(status) +add_subdirectory(strings) +add_subdirectory(synchronization) +add_subdirectory(time) +add_subdirectory(types) +add_subdirectory(utility) + +if (${ABSL_BUILD_DLL}) + absl_make_dll() + if (${ABSL_BUILD_TEST_HELPERS}) + absl_make_dll(TEST ON) + endif() +endif() diff --git a/third_party/absl/absl/abseil.podspec.gen.py b/third_party/absl/absl/abseil.podspec.gen.py new file mode 100755 index 000000000..cbf7cb423 --- /dev/null +++ b/third_party/absl/absl/abseil.podspec.gen.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""This script generates abseil.podspec from all BUILD.bazel files. + +This is expected to run on abseil git repository with Bazel 1.0 on Linux. +It recursively analyzes BUILD.bazel files using query command of Bazel to +dump its build rules in XML format. From these rules, it constructs podspec +structure. +""" + +import argparse +import collections +import os +import re +import subprocess +import xml.etree.ElementTree + +# Template of root podspec. +SPEC_TEMPLATE = """ +# This file has been automatically generated from a script. +# Please make modifications to `abseil.podspec.gen.py` instead. +Pod::Spec.new do |s| + s.name = 'abseil' + s.version = '${version}' + s.summary = 'Abseil Common Libraries (C++) from Google' + s.homepage = 'https://abseil.io' + s.license = 'Apache License, Version 2.0' + s.authors = { 'Abseil Team' => 'abseil-io@googlegroups.com' } + s.source = { + :git => 'https://github.com/abseil/abseil-cpp.git', + :tag => '${tag}', + } + s.resource_bundles = { + s.module_name => 'PrivacyInfo.xcprivacy', + } + s.module_name = 'absl' + s.header_mappings_dir = 'absl' + s.header_dir = 'absl' + s.libraries = 'c++' + s.compiler_flags = '-Wno-everything' + s.pod_target_xcconfig = { + 'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"', + 'USE_HEADERMAP' => 'NO', + 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + } + s.ios.deployment_target = '9.0' + s.osx.deployment_target = '10.11' + s.tvos.deployment_target = '9.0' + s.watchos.deployment_target = '2.0' + s.subspec 'xcprivacy' do |ss| + ss.resource_bundles = { + ss.module_name => 'PrivacyInfo.xcprivacy', + } + end +""" + +# Rule object representing the rule of Bazel BUILD. +Rule = collections.namedtuple( + "Rule", "type name package srcs hdrs textual_hdrs deps visibility testonly") + + +def get_elem_value(elem, name): + """Returns the value of XML element with the given name.""" + for child in elem: + if child.attrib.get("name") != name: + continue + if child.tag == "string": + return child.attrib.get("value") + if child.tag == "boolean": + return child.attrib.get("value") == "true" + if child.tag == "list": + return [nested_child.attrib.get("value") for nested_child in child] + raise "Cannot recognize tag: " + child.tag + return None + + +def normalize_paths(paths): + """Returns the list of normalized path.""" + # e.g. ["//absl/strings:dir/header.h"] -> ["absl/strings/dir/header.h"] + return [path.lstrip("/").replace(":", "/") for path in paths] + + +def parse_rule(elem, package): + """Returns a rule from bazel XML rule.""" + return Rule( + type=elem.attrib["class"], + name=get_elem_value(elem, "name"), + package=package, + srcs=normalize_paths(get_elem_value(elem, "srcs") or []), + hdrs=normalize_paths(get_elem_value(elem, "hdrs") or []), + textual_hdrs=normalize_paths(get_elem_value(elem, "textual_hdrs") or []), + deps=get_elem_value(elem, "deps") or [], + visibility=get_elem_value(elem, "visibility") or [], + testonly=get_elem_value(elem, "testonly") or False) + + +def read_build(package): + """Runs bazel query on given package file and returns all cc rules.""" + result = subprocess.check_output( + ["bazel", "query", package + ":all", "--output", "xml"]) + root = xml.etree.ElementTree.fromstring(result) + return [ + parse_rule(elem, package) + for elem in root + if elem.tag == "rule" and elem.attrib["class"].startswith("cc_") + ] + + +def collect_rules(root_path): + """Collects and returns all rules from root path recursively.""" + rules = [] + for cur, _, _ in os.walk(root_path): + build_path = os.path.join(cur, "BUILD.bazel") + if os.path.exists(build_path): + rules.extend(read_build("//" + cur)) + return rules + + +def relevant_rule(rule): + """Returns true if a given rule is relevant when generating a podspec.""" + return ( + # cc_library only (ignore cc_test, cc_binary) + rule.type == "cc_library" and + # ignore empty rule + (rule.hdrs + rule.textual_hdrs + rule.srcs) and + # ignore test-only rule + not rule.testonly) + + +def get_spec_var(depth): + """Returns the name of variable for spec with given depth.""" + return "s" if depth == 0 else "s{}".format(depth) + + +def get_spec_name(label): + """Converts the label of bazel rule to the name of podspec.""" + assert label.startswith("//absl/"), "{} doesn't start with //absl/".format( + label) + # e.g. //absl/apple/banana -> abseil/apple/banana + return "abseil/" + label[7:] + + +def write_podspec(f, rules, args): + """Writes a podspec from given rules and args.""" + rule_dir = build_rule_directory(rules)["abseil"] + # Write root part with given arguments + spec = re.sub(r"\$\{(\w+)\}", lambda x: args[x.group(1)], + SPEC_TEMPLATE).lstrip() + f.write(spec) + # Write all target rules + write_podspec_map(f, rule_dir, 0) + f.write("end\n") + + +def build_rule_directory(rules): + """Builds a tree-style rule directory from given rules.""" + rule_dir = {} + for rule in rules: + cur = rule_dir + for frag in get_spec_name(rule.package).split("/"): + cur = cur.setdefault(frag, {}) + cur[rule.name] = rule + return rule_dir + + +def write_podspec_map(f, cur_map, depth): + """Writes podspec from rule map recursively.""" + for key, value in sorted(cur_map.items()): + indent = " " * (depth + 1) + f.write("{indent}{var0}.subspec '{key}' do |{var1}|\n".format( + indent=indent, + key=key, + var0=get_spec_var(depth), + var1=get_spec_var(depth + 1))) + if isinstance(value, dict): + write_podspec_map(f, value, depth + 1) + else: + write_podspec_rule(f, value, depth + 1) + f.write("{indent}end\n".format(indent=indent)) + + +def write_podspec_rule(f, rule, depth): + """Writes podspec from given rule.""" + indent = " " * (depth + 1) + spec_var = get_spec_var(depth) + # Puts all files in hdrs, textual_hdrs, and srcs into source_files. + # Since CocoaPods treats header_files a bit differently from bazel, + # this won't generate a header_files field so that all source_files + # are considered as header files. + srcs = sorted(set(rule.hdrs + rule.textual_hdrs + rule.srcs)) + write_indented_list( + f, "{indent}{var}.source_files = ".format(indent=indent, var=spec_var), + srcs) + # Writes dependencies of this rule. + for dep in sorted(rule.deps): + name = get_spec_name(dep.replace(":", "/")) + f.write("{indent}{var}.dependency '{dep}'\n".format( + indent=indent, var=spec_var, dep=name)) + # Writes dependency to xcprivacy + f.write( + "{indent}{var}.dependency '{dep}'\n".format( + indent=indent, var=spec_var, dep="abseil/xcprivacy" + ) + ) + + +def write_indented_list(f, leading, values): + """Writes leading values in an indented style.""" + f.write(leading) + f.write((",\n" + " " * len(leading)).join("'{}'".format(v) for v in values)) + f.write("\n") + + +def generate(args): + """Generates a podspec file from all BUILD files under absl directory.""" + rules = filter(relevant_rule, collect_rules("absl")) + with open(args.output, "wt") as f: + write_podspec(f, rules, vars(args)) + + +def main(): + parser = argparse.ArgumentParser( + description="Generates abseil.podspec from BUILD.bazel") + parser.add_argument( + "-v", "--version", help="The version of podspec", required=True) + parser.add_argument( + "-t", + "--tag", + default=None, + help="The name of git tag (default: version)") + parser.add_argument( + "-o", + "--output", + default="abseil.podspec", + help="The name of output file (default: abseil.podspec)") + args = parser.parse_args() + if args.tag is None: + args.tag = args.version + generate(args) + + +if __name__ == "__main__": + main() diff --git a/third_party/absl/absl/algorithm/BUILD.bazel b/third_party/absl/absl/algorithm/BUILD.bazel new file mode 100644 index 000000000..ddf9e11f4 --- /dev/null +++ b/third_party/absl/absl/algorithm/BUILD.bazel @@ -0,0 +1,88 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) + +licenses(["notice"]) + +cc_library( + name = "algorithm", + hdrs = ["algorithm.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) + +cc_test( + name = "algorithm_test", + size = "small", + srcs = ["algorithm_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":algorithm", + "//absl/base:config", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "container", + hdrs = [ + "container.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":algorithm", + "//absl/base:core_headers", + "//absl/base:nullability", + "//absl/meta:type_traits", + ], +) + +cc_test( + name = "container_test", + srcs = ["container_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container", + "//absl/base", + "//absl/base:core_headers", + "//absl/memory", + "//absl/types:span", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/third_party/absl/absl/algorithm/CMakeLists.txt b/third_party/absl/absl/algorithm/CMakeLists.txt new file mode 100644 index 000000000..5577164dc --- /dev/null +++ b/third_party/absl/absl/algorithm/CMakeLists.txt @@ -0,0 +1,71 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +absl_cc_library( + NAME + algorithm + HDRS + "algorithm.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_test( + NAME + algorithm_test + SRCS + "algorithm_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::algorithm + absl::config + GTest::gmock_main +) + +absl_cc_library( + NAME + algorithm_container + HDRS + "container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::meta + absl::nullability + PUBLIC +) + +absl_cc_test( + NAME + container_test + SRCS + "container_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::algorithm_container + absl::base + absl::core_headers + absl::memory + absl::span + GTest::gmock_main +) diff --git a/third_party/absl/absl/algorithm/algorithm.h b/third_party/absl/absl/algorithm/algorithm.h new file mode 100644 index 000000000..59aeed7d2 --- /dev/null +++ b/third_party/absl/absl/algorithm/algorithm.h @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: algorithm.h +// ----------------------------------------------------------------------------- +// +// This header file contains Google extensions to the standard C++ +// header. + +#ifndef ABSL_ALGORITHM_ALGORITHM_H_ +#define ABSL_ALGORITHM_ALGORITHM_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// equal() +// rotate() +// +// Historical note: Abseil once provided implementations of these algorithms +// prior to their adoption in C++14. New code should prefer to use the std +// variants. +// +// See the documentation for the STL header for more information: +// https://en.cppreference.com/w/cpp/header/algorithm +using std::equal; +using std::rotate; + +// linear_search() +// +// Performs a linear search for `value` using the iterator `first` up to +// but not including `last`, returning true if [`first`, `last`) contains an +// element equal to `value`. +// +// A linear search is of O(n) complexity which is guaranteed to make at most +// n = (`last` - `first`) comparisons. A linear search over short containers +// may be faster than a binary search, even when the container is sorted. +template +bool linear_search(InputIterator first, InputIterator last, + const EqualityComparable& value) { + return std::find(first, last, value) != last; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/third_party/absl/absl/algorithm/algorithm_test.cc b/third_party/absl/absl/algorithm/algorithm_test.cc new file mode 100644 index 000000000..e6ee46952 --- /dev/null +++ b/third_party/absl/absl/algorithm/algorithm_test.cc @@ -0,0 +1,50 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/algorithm.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace { + +class LinearSearchTest : public testing::Test { + protected: + LinearSearchTest() : container_{1, 2, 3} {} + + static bool Is3(int n) { return n == 3; } + static bool Is4(int n) { return n == 4; } + + std::vector container_; +}; + +TEST_F(LinearSearchTest, linear_search) { + EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3)); + EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4)); +} + +TEST_F(LinearSearchTest, linear_searchConst) { + const std::vector *const const_container = &container_; + EXPECT_TRUE( + absl::linear_search(const_container->begin(), const_container->end(), 3)); + EXPECT_FALSE( + absl::linear_search(const_container->begin(), const_container->end(), 4)); +} + +} // namespace diff --git a/third_party/absl/absl/algorithm/container.h b/third_party/absl/absl/algorithm/container.h new file mode 100644 index 000000000..c7bafae14 --- /dev/null +++ b/third_party/absl/absl/algorithm/container.h @@ -0,0 +1,1760 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: container.h +// ----------------------------------------------------------------------------- +// +// This header file provides Container-based versions of algorithmic functions +// within the C++ standard library. The following standard library sets of +// functions are covered within this file: +// +// * Algorithmic functions +// * Algorithmic functions +// * functions +// +// The standard library functions operate on iterator ranges; the functions +// within this API operate on containers, though many return iterator ranges. +// +// All functions within this API are named with a `c_` prefix. Calls such as +// `absl::c_xx(container, ...) are equivalent to std:: functions such as +// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on +// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) +// have no equivalent here. +// +// For template parameter and variable naming, `C` indicates the container type +// to which the function is applied, `Pred` indicates the predicate object type +// to be used by the function and `T` indicates the applicable element type. + +#ifndef ABSL_ALGORITHM_CONTAINER_H_ +#define ABSL_ALGORITHM_CONTAINER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/macros.h" +#include "absl/base/nullability.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_algorithm_internal { + +// NOTE: it is important to defer to ADL lookup for building with C++ modules, +// especially for headers like which are not visible from this file +// but specialize std::begin and std::end. +using std::begin; +using std::end; + +// The type of the iterator given by begin(c) (possibly std::begin(c)). +// ContainerIter> gives vector::const_iterator, +// while ContainerIter> gives vector::iterator. +template +using ContainerIter = decltype(begin(std::declval())); + +// An MSVC bug involving template parameter substitution requires us to use +// decltype() here instead of just std::pair. +template +using ContainerIterPairType = + decltype(std::make_pair(ContainerIter(), ContainerIter())); + +template +using ContainerDifferenceType = decltype(std::distance( + std::declval>(), std::declval>())); + +template +using ContainerPointerType = + typename std::iterator_traits>::pointer; + +// container_algorithm_internal::c_begin and +// container_algorithm_internal::c_end are abbreviations for proper ADL +// lookup of std::begin and std::end, i.e. +// using std::begin; +// using std::end; +// std::foo(begin(c), end(c)); +// becomes +// std::foo(container_algorithm_internal::begin(c), +// container_algorithm_internal::end(c)); +// These are meant for internal use only. + +template +ContainerIter c_begin(C& c) { + return begin(c); +} + +template +ContainerIter c_end(C& c) { + return end(c); +} + +template +struct IsUnorderedContainer : std::false_type {}; + +template +struct IsUnorderedContainer< + std::unordered_map> : std::true_type {}; + +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal + +// PUBLIC API + +//------------------------------------------------------------------------------ +// Abseil algorithm.h functions +//------------------------------------------------------------------------------ + +// c_linear_search() +// +// Container-based version of absl::linear_search() for performing a linear +// search within a container. +template +bool c_linear_search(const C& c, EqualityComparable&& value) { + return linear_search(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_distance() +// +// Container-based version of the `std::distance()` function to +// return the number of elements within a container. +template +container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c) { + return std::distance(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +//------------------------------------------------------------------------------ +// Non-modifying sequence operations +//------------------------------------------------------------------------------ + +// c_all_of() +// +// Container-based version of the `std::all_of()` function to +// test if all elements within a container satisfy a condition. +template +bool c_all_of(const C& c, Pred&& pred) { + return std::all_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_any_of() +// +// Container-based version of the `std::any_of()` function to +// test if any element in a container fulfills a condition. +template +bool c_any_of(const C& c, Pred&& pred) { + return std::any_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_none_of() +// +// Container-based version of the `std::none_of()` function to +// test if no elements in a container fulfill a condition. +template +bool c_none_of(const C& c, Pred&& pred) { + return std::none_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_for_each() +// +// Container-based version of the `std::for_each()` function to +// apply a function to a container's elements. +template +decay_t c_for_each(C&& c, Function&& f) { + return std::for_each(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(f)); +} + +// c_find() +// +// Container-based version of the `std::find()` function to find +// the first element containing the passed value within a container value. +template +container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { + return std::find(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_find_if() +// +// Container-based version of the `std::find_if()` function to find +// the first element in a container matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { + return std::find_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_if_not() +// +// Container-based version of the `std::find_if_not()` function to +// find the first element in a container not matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if_not(C& c, + Pred&& pred) { + return std::find_if_not(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_end() +// +// Container-based version of the `std::find_end()` function to +// find the last subsequence within a container. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_find_end() for using a predicate evaluation other than `==` as +// the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_find_first_of() +// +// Container-based version of the `std::find_first_of()` function to +// find the first element within the container that is also within the options +// container. +template +container_algorithm_internal::ContainerIter c_find_first_of(C1& container, + C2& options) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options)); +} + +// Overload of c_find_first_of() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options), + std::forward(pred)); +} + +// c_adjacent_find() +// +// Container-based version of the `std::adjacent_find()` function to +// find equal adjacent elements within a container. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_adjacent_find() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(pred)); +} + +// c_count() +// +// Container-based version of the `std::count()` function to count +// values that match within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value) { + return std::count(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_count_if() +// +// Container-based version of the `std::count_if()` function to +// count values matching a condition within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred) { + return std::count_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_mismatch() +// +// Container-based version of the `std::mismatch()` function to +// return the first element where two ordered containers differ. Applies `==` to +// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, + C2& c2) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_mismatch() for using a predicate evaluation other than `==` as +// the function's test condition. Applies `pred`to the first N elements of `c1` +// and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIterPairType c_mismatch( + C1& c1, C2& c2, BinaryPredicate pred) { + return std::mismatch(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), pred); +} + +// c_equal() +// +// Container-based version of the `std::equal()` function to +// test whether two containers are equal. +template +bool c_equal(const C1& c1, const C2& c2) { + return std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_equal() for using a predicate evaluation other than `==` as +// the function's test condition. +template +bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + return std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(pred)); +} + +// c_is_permutation() +// +// Container-based version of the `std::is_permutation()` function +// to test whether a container is a permutation of another. +template +bool c_is_permutation(const C1& c1, const C2& c2) { + return std::is_permutation(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_is_permutation() for using a predicate evaluation other than +// `==` as the function's test condition. +template +bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + return std::is_permutation(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(pred)); +} + +// c_search() +// +// Container-based version of the `std::search()` function to search +// a container for a subsequence. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_search() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_search_n() +// +// Container-based version of the `std::search_n()` function to +// search a container for the first sequence of N elements. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value)); +} + +// Overload of c_search_n() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Modifying sequence operations +//------------------------------------------------------------------------------ + +// c_copy() +// +// Container-based version of the `std::copy()` function to copy a +// container's elements into an iterator. +template +OutputIterator c_copy(const InputSequence& input, OutputIterator output) { + return std::copy(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output); +} + +// c_copy_n() +// +// Container-based version of the `std::copy_n()` function to copy a +// container's first N elements into an iterator. +template +OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); +} + +// c_copy_if() +// +// Container-based version of the `std::copy_if()` function to copy +// a container's elements satisfying some condition into an iterator. +template +OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, + Pred&& pred) { + return std::copy_if(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(pred)); +} + +// c_copy_backward() +// +// Container-based version of the `std::copy_backward()` function to +// copy a container's elements in reverse order into an iterator. +template +BidirectionalIterator c_copy_backward(const C& src, + BidirectionalIterator dest) { + return std::copy_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move() +// +// Container-based version of the `std::move()` function to move +// a container's elements into an iterator. +template +OutputIterator c_move(C&& src, OutputIterator dest) { + return std::move(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move_backward() +// +// Container-based version of the `std::move_backward()` function to +// move a container's elements into an iterator in reverse order. +template +BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { + return std::move_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_swap_ranges() +// +// Container-based version of the `std::swap_ranges()` function to +// swap a container's elements with another container's elements. Swaps the +// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + using std::swap; + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + swap(*first1, *first2); + } + return first2; +} + +// c_transform() +// +// Container-based version of the `std::transform()` function to +// transform a container's elements using the unary operation, storing the +// result in an iterator pointing to the last transformed element in the output +// range. +template +OutputIterator c_transform(const InputSequence& input, OutputIterator output, + UnaryOp&& unary_op) { + return std::transform(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(unary_op)); +} + +// Overload of c_transform() for performing a transformation using a binary +// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, +// where N = min(size(c1), size(c2)). +template +OutputIterator c_transform(const InputSequence1& input1, + const InputSequence2& input2, OutputIterator output, + BinaryOp&& binary_op) { + auto first1 = container_algorithm_internal::c_begin(input1); + auto last1 = container_algorithm_internal::c_end(input1); + auto first2 = container_algorithm_internal::c_begin(input2); + auto last2 = container_algorithm_internal::c_end(input2); + for (; first1 != last1 && first2 != last2; + ++first1, (void)++first2, ++output) { + *output = binary_op(*first1, *first2); + } + + return output; +} + +// c_replace() +// +// Container-based version of the `std::replace()` function to +// replace a container's elements of some value with a new value. The container +// is modified in place. +template +void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { + std::replace(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), old_value, + new_value); +} + +// c_replace_if() +// +// Container-based version of the `std::replace_if()` function to +// replace a container's elements of some value with a new value based on some +// condition. The container is modified in place. +template +void c_replace_if(C& c, Pred&& pred, T&& new_value) { + std::replace_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred), std::forward(new_value)); +} + +// c_replace_copy() +// +// Container-based version of the `std::replace_copy()` function to +// replace a container's elements of some value with a new value and return the +// results within an iterator. +template +OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, + T&& new_value) { + return std::replace_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(old_value), + std::forward(new_value)); +} + +// c_replace_copy_if() +// +// Container-based version of the `std::replace_copy_if()` function +// to replace a container's elements of some value with a new value based on +// some condition, and return the results within an iterator. +template +OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, + const T& new_value) { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred), new_value); +} + +// c_fill() +// +// Container-based version of the `std::fill()` function to fill a +// container with some value. +template +void c_fill(C& c, const T& value) { + std::fill(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), value); +} + +// c_fill_n() +// +// Container-based version of the `std::fill_n()` function to fill +// the first N elements in a container with some value. +template +void c_fill_n(C& c, Size n, const T& value) { + std::fill_n(container_algorithm_internal::c_begin(c), n, value); +} + +// c_generate() +// +// Container-based version of the `std::generate()` function to +// assign a container's elements to the values provided by the given generator. +template +void c_generate(C& c, Generator&& gen) { + std::generate(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +// c_generate_n() +// +// Container-based version of the `std::generate_n()` function to +// assign a container's first N elements to the values provided by the given +// generator. +template +container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, + Generator&& gen) { + return std::generate_n(container_algorithm_internal::c_begin(c), n, + std::forward(gen)); +} + +// Note: `c_xx()` container versions for `remove()`, `remove_if()`, +// and `unique()` are omitted, because it's not clear whether or not such +// functions should call erase on their supplied sequences afterwards. Either +// behavior would be surprising for a different set of users. + +// c_remove_copy() +// +// Container-based version of the `std::remove_copy()` function to +// copy a container's elements while removing any elements matching the given +// `value`. +template +OutputIterator c_remove_copy(const C& c, OutputIterator result, + const T& value) { + return std::remove_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + value); +} + +// c_remove_copy_if() +// +// Container-based version of the `std::remove_copy_if()` function +// to copy a container's elements while removing any elements matching the given +// condition. +template +OutputIterator c_remove_copy_if(const C& c, OutputIterator result, + Pred&& pred) { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_unique_copy() +// +// Container-based version of the `std::unique_copy()` function to +// copy a container's elements while removing any elements containing duplicate +// values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result); +} + +// Overload of c_unique_copy() for using a predicate evaluation other than +// `==` for comparing uniqueness of the element values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result, + BinaryPredicate&& pred) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_reverse() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements. +template +void c_reverse(Sequence& sequence) { + std::reverse(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// c_reverse_copy() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements and write them to an iterator range. +template +OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + result); +} + +// c_rotate() +// +// Container-based version of the `std::rotate()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in the container. +template > +Iterator c_rotate(C& sequence, Iterator middle) { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// c_rotate_copy() +// +// Container-based version of the `std::rotate_copy()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in a new iterator range. +template +OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), + middle, container_algorithm_internal::c_end(sequence), + result); +} + +// c_shuffle() +// +// Container-based version of the `std::shuffle()` function to +// randomly shuffle elements within the container using a `gen()` uniform random +// number generator. +template +void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { + std::shuffle(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +// c_sample() +// +// Container-based version of the `std::sample()` function to +// randomly sample elements from the container without replacement using a +// `gen()` uniform random number generator and write them to an iterator range. +template +OutputIterator c_sample(const C& c, OutputIterator result, Distance n, + UniformRandomBitGenerator&& gen) { +#if defined(__cpp_lib_sample) && __cpp_lib_sample >= 201603L + return std::sample(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, n, + std::forward(gen)); +#else + // Fall back to a stable selection-sampling implementation. + auto first = container_algorithm_internal::c_begin(c); + Distance unsampled_elements = c_distance(c); + n = (std::min)(n, unsampled_elements); + for (; n != 0; ++first) { + Distance r = + std::uniform_int_distribution(0, --unsampled_elements)(gen); + if (r < n) { + *result++ = *first; + --n; + } + } + return result; +#endif +} + +//------------------------------------------------------------------------------ +// Partition functions +//------------------------------------------------------------------------------ + +// c_is_partitioned() +// +// Container-based version of the `std::is_partitioned()` function +// to test whether all elements in the container for which `pred` returns `true` +// precede those for which `pred` is `false`. +template +bool c_is_partitioned(const C& c, Pred&& pred) { + return std::is_partitioned(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition() +// +// Container-based version of the `std::partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// returning an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { + return std::partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_stable_partition() +// +// Container-based version of the `std::stable_partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// preserving the relative ordering between the two groups. The function returns +// an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_stable_partition(C& c, + Pred&& pred) { + return std::stable_partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition_copy() +// +// Container-based version of the `std::partition_copy()` function +// to partition a container's elements and return them into two iterators: one +// for which `pred` returns `true`, and one for which `pred` returns `false.` + +template +std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, + Pred&& pred) { + return std::partition_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), out_true, + out_false, std::forward(pred)); +} + +// c_partition_point() +// +// Container-based version of the `std::partition_point()` function +// to return the first element of an already partitioned container for which +// the given `pred` is not `true`. +template +container_algorithm_internal::ContainerIter c_partition_point(C& c, + Pred&& pred) { + return std::partition_point(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Sorting functions +//------------------------------------------------------------------------------ + +// c_sort() +// +// Container-based version of the `std::sort()` function +// to sort elements in ascending order of their values. +template +void c_sort(C& c) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_sort(C& c, LessThan&& comp) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_stable_sort() +// +// Container-based version of the `std::stable_sort()` function +// to sort elements in ascending order of their values, preserving the order +// of equivalents. +template +void c_stable_sort(C& c) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_stable_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_stable_sort(C& c, LessThan&& comp) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_is_sorted() +// +// Container-based version of the `std::is_sorted()` function +// to evaluate whether the given container is sorted in ascending order. +template +bool c_is_sorted(const C& c) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// c_is_sorted() overload for performing a `comp` comparison other than the +// default `operator<`. +template +bool c_is_sorted(const C& c, LessThan&& comp) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_partial_sort() +// +// Container-based version of the `std::partial_sort()` function +// to rearrange elements within a container such that elements before `middle` +// are sorted in ascending order. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_partial_sort() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_partial_sort_copy() +// +// Container-based version of the `std::partial_sort_copy()` +// function to sort the elements in the given range `result` within the larger +// `sequence` in ascending order (and using `result` as the output parameter). +// At most min(result.last - result.first, sequence.last - sequence.first) +// elements from the sequence will be stored in the result. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result)); +} + +// Overload of c_partial_sort_copy() for performing a `comp` comparison other +// than the default `operator<`. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + LessThan&& comp) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result), + std::forward(comp)); +} + +// c_is_sorted_until() +// +// Container-based version of the `std::is_sorted_until()` function +// to return the first element within a container that is not sorted in +// ascending order as an iterator. +template +container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_is_sorted_until() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, LessThan&& comp) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_nth_element() +// +// Container-based version of the `std::nth_element()` function +// to rearrange the elements within a container such that the `nth` element +// would be in that position in an ordered sequence; other elements may be in +// any order, except that all preceding `nth` will be less than that element, +// and all following `nth` will be greater than that element. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_nth_element() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + LessThan&& comp) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Binary Search +//------------------------------------------------------------------------------ + +// c_lower_bound() +// +// Container-based version of the `std::lower_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which does not compare less than `value`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, const T& value) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value); +} + +// Overload of c_lower_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, const T& value, LessThan&& comp) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value, + std::forward(comp)); +} + +// c_upper_bound() +// +// Container-based version of the `std::upper_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which is greater than `value`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, const T& value) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value); +} + +// Overload of c_upper_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, const T& value, LessThan&& comp) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value, + std::forward(comp)); +} + +// c_equal_range() +// +// Container-based version of the `std::equal_range()` function +// to return an iterator pair pointing to the first and last elements in a +// sorted container which compare equal to `value`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, const T& value) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value); +} + +// Overload of c_equal_range() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value, + std::forward(comp)); +} + +// c_binary_search() +// +// Container-based version of the `std::binary_search()` function +// to test if any element in the sorted container contains a value equivalent to +// 'value'. +template +bool c_binary_search(const Sequence& sequence, const T& value) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + value); +} + +// Overload of c_binary_search() for performing a `comp` comparison other than +// the default `operator<`. +template +bool c_binary_search(const Sequence& sequence, const T& value, + LessThan&& comp) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + value, std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Merge functions +//------------------------------------------------------------------------------ + +// c_merge() +// +// Container-based version of the `std::merge()` function +// to merge two sorted containers into a single sorted iterator. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result); +} + +// Overload of c_merge() for performing a `comp` comparison other than +// the default `operator<`. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, + LessThan&& comp) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result, + std::forward(comp)); +} + +// c_inplace_merge() +// +// Container-based version of the `std::inplace_merge()` function +// to merge a supplied iterator `middle` into a container. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c)); +} + +// Overload of c_inplace_merge() for performing a merge using a `comp` other +// than `operator<`. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_includes() +// +// Container-based version of the `std::includes()` function +// to test whether a sorted container `c1` entirely contains another sorted +// container `c2`. +template +bool c_includes(const C1& c1, const C2& c2) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_includes() for performing a merge using a `comp` other than +// `operator<`. +template +bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(comp)); +} + +// c_set_union() +// +// Container-based version of the `std::set_union()` function +// to return an iterator containing the union of two containers; duplicate +// values are not copied into the output. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_union() for performing a merge using a `comp` other than +// `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, + LessThan&& comp) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_intersection() +// +// Container-based version of the `std::set_intersection()` function +// to return an iterator containing the intersection of two sorted containers. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using operator<. + assert(absl::c_is_sorted(c1)); + assert(absl::c_is_sorted(c2)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_intersection() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output, LessThan&& comp) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using the same comparator. + assert(absl::c_is_sorted(c1, comp)); + assert(absl::c_is_sorted(c2, comp)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_difference() +// +// Container-based version of the `std::set_difference()` function +// to return an iterator containing elements present in the first container but +// not in the second. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_difference() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output, LessThan&& comp) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_symmetric_difference() +// +// Container-based version of the `std::set_symmetric_difference()` +// function to return an iterator containing elements present in either one +// container or the other, but not both. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_symmetric_difference() for performing a merge using a +// `comp` other than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output, + LessThan&& comp) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Heap functions +//------------------------------------------------------------------------------ + +// c_push_heap() +// +// Container-based version of the `std::push_heap()` function +// to push a value onto a container heap. +template +void c_push_heap(RandomAccessContainer& sequence) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_push_heap() for performing a push operation on a heap using a +// `comp` other than `operator<`. +template +void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_pop_heap() +// +// Container-based version of the `std::pop_heap()` function +// to pop a value from a heap container. +template +void c_pop_heap(RandomAccessContainer& sequence) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_pop_heap() for performing a pop operation on a heap using a +// `comp` other than `operator<`. +template +void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_make_heap() +// +// Container-based version of the `std::make_heap()` function +// to make a container a heap. +template +void c_make_heap(RandomAccessContainer& sequence) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_make_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_sort_heap() +// +// Container-based version of the `std::sort_heap()` function +// to sort a heap into ascending order (after which it is no longer a heap). +template +void c_sort_heap(RandomAccessContainer& sequence) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_sort_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap() +// +// Container-based version of the `std::is_heap()` function +// to check whether the given container is a heap. +template +bool c_is_heap(const RandomAccessContainer& sequence) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap_until() +// +// Container-based version of the `std::is_heap_until()` function +// to find the first element in a given container which is not in heap order. +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap_until() for performing heap comparisons using a +// `comp` other than `operator<` +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Min/max +//------------------------------------------------------------------------------ + +// c_min_element() +// +// Container-based version of the `std::min_element()` function +// to return an iterator pointing to the element with the smallest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_min_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, LessThan&& comp) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_max_element() +// +// Container-based version of the `std::max_element()` function +// to return an iterator pointing to the element with the largest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_max_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, LessThan&& comp) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_minmax_element() +// +// Container-based version of the `std::minmax_element()` function +// to return a pair of iterators pointing to the elements containing the +// smallest and largest values, respectively, using `operator<` to make the +// comparisons. +template +container_algorithm_internal::ContainerIterPairType c_minmax_element( + C& c) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_minmax_element() for performing `comp` comparisons other than +// `operator<`. +template +container_algorithm_internal::ContainerIterPairType c_minmax_element( + C& c, LessThan&& comp) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Lexicographical Comparisons +//------------------------------------------------------------------------------ + +// c_lexicographical_compare() +// +// Container-based version of the `std::lexicographical_compare()` +// function to lexicographically compare (e.g. sort words alphabetically) two +// container sequences. The comparison is performed using `operator<`. Note +// that capital letters ("A-Z") have ASCII values less than lowercase letters +// ("a-z"). +template +bool c_lexicographical_compare(const Sequence1& sequence1, + const Sequence2& sequence2) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2)); +} + +// Overload of c_lexicographical_compare() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_lexicographical_compare(const Sequence1& sequence1, + const Sequence2& sequence2, LessThan&& comp) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp)); +} + +// c_next_permutation() +// +// Container-based version of the `std::next_permutation()` function +// to rearrange a container's elements into the next lexicographically greater +// permutation. +template +bool c_next_permutation(C& c) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_next_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_next_permutation(C& c, LessThan&& comp) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_prev_permutation() +// +// Container-based version of the `std::prev_permutation()` function +// to rearrange a container's elements into the next lexicographically lesser +// permutation. +template +bool c_prev_permutation(C& c) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_prev_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_prev_permutation(C& c, LessThan&& comp) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_iota() +// +// Container-based version of the `std::iota()` function +// to compute successive values of `value`, as if incremented with `++value` +// after each element is written, and write them to the container. +template +void c_iota(Sequence& sequence, const T& value) { + std::iota(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), value); +} + +// c_accumulate() +// +// Container-based version of the `std::accumulate()` function +// to accumulate the element values of a container to `init` and return that +// accumulation by value. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_accumulate(const Sequence& sequence, T&& init) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init)); +} + +// Overload of c_accumulate() for using a binary operations other than +// addition for computing the accumulation. +template +decay_t c_accumulate(const Sequence& sequence, T&& init, + BinaryOp&& binary_op) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init), + std::forward(binary_op)); +} + +// c_inner_product() +// +// Container-based version of the `std::inner_product()` function +// to compute the cumulative inner product of container element pairs. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum)); +} + +// Overload of c_inner_product() for using binary operations other than +// `operator+` (for computing the accumulation) and `operator*` (for computing +// the product between the two container's element pair). +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum), std::forward(op1), + std::forward(op2)); +} + +// c_adjacent_difference() +// +// Container-based version of the `std::adjacent_difference()` +// function to compute the difference between each element and the one preceding +// it and write it to an iterator. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_adjacent_difference() for using a binary operation other than +// subtraction to compute the adjacent difference. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first, BinaryOp&& op) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +// c_partial_sum() +// +// Container-based version of the `std::partial_sum()` function +// to compute the partial sum of the elements in a sequence and write them +// to an iterator. The partial sum is the sum of all element values so far in +// the sequence. +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_partial_sum() for using a binary operation other than addition +// to compute the "partial sum". +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, + BinaryOp&& op) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/third_party/absl/absl/algorithm/container_test.cc b/third_party/absl/absl/algorithm/container_test.cc new file mode 100644 index 000000000..c01f5fc01 --- /dev/null +++ b/third_party/absl/absl/algorithm/container_test.cc @@ -0,0 +1,1147 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/algorithm/container.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" +#include "absl/memory/memory.h" +#include "absl/types/span.h" + +namespace { + +using ::testing::Each; +using ::testing::ElementsAre; +using ::testing::Gt; +using ::testing::IsNull; +using ::testing::IsSubsetOf; +using ::testing::Lt; +using ::testing::Pointee; +using ::testing::SizeIs; +using ::testing::Truly; +using ::testing::UnorderedElementsAre; + +// Most of these tests just check that the code compiles, not that it +// does the right thing. That's fine since the functions just forward +// to the STL implementation. +class NonMutatingTest : public testing::Test { + protected: + std::unordered_set container_ = {1, 2, 3}; + std::list sequence_ = {1, 2, 3}; + std::vector vector_ = {1, 2, 3}; + int array_[3] = {1, 2, 3}; +}; + +struct AccumulateCalls { + void operator()(int value) { calls.push_back(value); } + std::vector calls; +}; + +bool Predicate(int value) { return value < 3; } +bool BinPredicate(int v1, int v2) { return v1 < v2; } +bool Equals(int v1, int v2) { return v1 == v2; } +bool IsOdd(int x) { return x % 2 != 0; } + +TEST_F(NonMutatingTest, Distance) { + EXPECT_EQ(container_.size(), + static_cast(absl::c_distance(container_))); + EXPECT_EQ(sequence_.size(), static_cast(absl::c_distance(sequence_))); + EXPECT_EQ(vector_.size(), static_cast(absl::c_distance(vector_))); + EXPECT_EQ(ABSL_ARRAYSIZE(array_), + static_cast(absl::c_distance(array_))); + + // Works with a temporary argument. + EXPECT_EQ(vector_.size(), + static_cast(absl::c_distance(std::vector(vector_)))); +} + +TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) { + // Works with classes which have custom ADL-selected overloads of std::begin + // and std::end. + std::initializer_list a = {1, 2, 3}; + std::valarray b = {1, 2, 3}; + EXPECT_EQ(3, absl::c_distance(a)); + EXPECT_EQ(3, absl::c_distance(b)); + + // It is assumed that other c_* functions use the same mechanism for + // ADL-selecting begin/end overloads. +} + +TEST_F(NonMutatingTest, ForEach) { + AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls()); + // Don't rely on the unordered_set's order. + std::sort(c.calls.begin(), c.calls.end()); + EXPECT_EQ(vector_, c.calls); + + // Works with temporary container, too. + AccumulateCalls c2 = + absl::c_for_each(std::unordered_set(container_), AccumulateCalls()); + std::sort(c2.calls.begin(), c2.calls.end()); + EXPECT_EQ(vector_, c2.calls); +} + +TEST_F(NonMutatingTest, FindReturnsCorrectType) { + auto it = absl::c_find(container_, 3); + EXPECT_EQ(3, *it); + absl::c_find(absl::implicit_cast&>(sequence_), 3); +} + +TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); } + +TEST_F(NonMutatingTest, FindIfNot) { + absl::c_find_if_not(container_, Predicate); +} + +TEST_F(NonMutatingTest, FindEnd) { + absl::c_find_end(sequence_, vector_); + absl::c_find_end(vector_, sequence_); +} + +TEST_F(NonMutatingTest, FindEndWithPredicate) { + absl::c_find_end(sequence_, vector_, BinPredicate); + absl::c_find_end(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, FindFirstOf) { + absl::c_find_first_of(container_, sequence_); + absl::c_find_first_of(sequence_, container_); +} + +TEST_F(NonMutatingTest, FindFirstOfWithPredicate) { + absl::c_find_first_of(container_, sequence_, BinPredicate); + absl::c_find_first_of(sequence_, container_, BinPredicate); +} + +TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); } + +TEST_F(NonMutatingTest, AdjacentFindWithPredicate) { + absl::c_adjacent_find(sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); } + +TEST_F(NonMutatingTest, CountIf) { + EXPECT_EQ(2, absl::c_count_if(container_, Predicate)); + const std::unordered_set& const_container = container_; + EXPECT_EQ(2, absl::c_count_if(const_container, Predicate)); +} + +TEST_F(NonMutatingTest, Mismatch) { + // Testing necessary as absl::c_mismatch executes logic. + { + auto result = absl::c_mismatch(vector_, sequence_); + EXPECT_EQ(result.first, vector_.end()); + EXPECT_EQ(result.second, sequence_.end()); + } + { + auto result = absl::c_mismatch(sequence_, vector_); + EXPECT_EQ(result.first, sequence_.end()); + EXPECT_EQ(result.second, vector_.end()); + } + + sequence_.back() = 5; + { + auto result = absl::c_mismatch(vector_, sequence_); + EXPECT_EQ(result.first, std::prev(vector_.end())); + EXPECT_EQ(result.second, std::prev(sequence_.end())); + } + { + auto result = absl::c_mismatch(sequence_, vector_); + EXPECT_EQ(result.first, std::prev(sequence_.end())); + EXPECT_EQ(result.second, std::prev(vector_.end())); + } + + sequence_.pop_back(); + { + auto result = absl::c_mismatch(vector_, sequence_); + EXPECT_EQ(result.first, std::prev(vector_.end())); + EXPECT_EQ(result.second, sequence_.end()); + } + { + auto result = absl::c_mismatch(sequence_, vector_); + EXPECT_EQ(result.first, sequence_.end()); + EXPECT_EQ(result.second, std::prev(vector_.end())); + } + { + struct NoNotEquals { + constexpr bool operator==(NoNotEquals) const { return true; } + constexpr bool operator!=(NoNotEquals) const = delete; + }; + std::vector first; + std::list second; + + // Check this still compiles. + absl::c_mismatch(first, second); + } +} + +TEST_F(NonMutatingTest, MismatchWithPredicate) { + // Testing necessary as absl::c_mismatch executes logic. + { + auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); + EXPECT_EQ(result.first, vector_.begin()); + EXPECT_EQ(result.second, sequence_.begin()); + } + { + auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); + EXPECT_EQ(result.first, sequence_.begin()); + EXPECT_EQ(result.second, vector_.begin()); + } + + sequence_.front() = 0; + { + auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); + EXPECT_EQ(result.first, vector_.begin()); + EXPECT_EQ(result.second, sequence_.begin()); + } + { + auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); + EXPECT_EQ(result.first, std::next(sequence_.begin())); + EXPECT_EQ(result.second, std::next(vector_.begin())); + } + + sequence_.clear(); + { + auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); + EXPECT_EQ(result.first, vector_.begin()); + EXPECT_EQ(result.second, sequence_.end()); + } + { + auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); + EXPECT_EQ(result.first, sequence_.end()); + EXPECT_EQ(result.second, vector_.begin()); + } +} + +TEST_F(NonMutatingTest, Equal) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_)); + EXPECT_TRUE(absl::c_equal(sequence_, array_)); + EXPECT_TRUE(absl::c_equal(array_, vector_)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus)); + EXPECT_FALSE(absl::c_equal(array_, vector_plus)); +} + +TEST_F(NonMutatingTest, EqualWithPredicate) { + EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals)); + EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals)); + EXPECT_TRUE(absl::c_equal(array_, sequence_, Equals)); + EXPECT_TRUE(absl::c_equal(vector_, array_, Equals)); + + // Test that behavior appropriately differs from that of equal(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals)); + EXPECT_FALSE(absl::c_equal(vector_plus, array_, Equals)); +} + +TEST_F(NonMutatingTest, IsPermutation) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus)); +} + +TEST_F(NonMutatingTest, IsPermutationWithPredicate) { + auto vector_permut_ = vector_; + std::next_permutation(vector_permut_.begin(), vector_permut_.end()); + EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals)); + EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals)); + + // Test that behavior appropriately differs from that of is_permutation(). + std::vector vector_plus = {1, 2, 3}; + vector_plus.push_back(4); + EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals)); + EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals)); +} + +TEST_F(NonMutatingTest, Search) { + absl::c_search(sequence_, vector_); + absl::c_search(vector_, sequence_); + absl::c_search(array_, sequence_); +} + +TEST_F(NonMutatingTest, SearchWithPredicate) { + absl::c_search(sequence_, vector_, BinPredicate); + absl::c_search(vector_, sequence_, BinPredicate); +} + +TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); } + +TEST_F(NonMutatingTest, SearchNWithPredicate) { + absl::c_search_n(sequence_, 3, 1, BinPredicate); +} + +TEST_F(NonMutatingTest, LowerBound) { + std::list::iterator i = absl::c_lower_bound(sequence_, 3); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(2, std::distance(sequence_.begin(), i)); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, LowerBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_lower_bound(v, 3, std::greater()); + EXPECT_TRUE(i == v.begin()); + EXPECT_EQ(3, *i); +} + +TEST_F(NonMutatingTest, UpperBound) { + std::list::iterator i = absl::c_upper_bound(sequence_, 1); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(1, std::distance(sequence_.begin(), i)); + EXPECT_EQ(2, *i); +} + +TEST_F(NonMutatingTest, UpperBoundWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::vector::iterator i = absl::c_upper_bound(v, 1, std::greater()); + EXPECT_EQ(3, i - v.begin()); + EXPECT_TRUE(i == v.end()); +} + +TEST_F(NonMutatingTest, EqualRange) { + std::pair::iterator, std::list::iterator> p = + absl::c_equal_range(sequence_, 2); + EXPECT_EQ(1, std::distance(sequence_.begin(), p.first)); + EXPECT_EQ(2, std::distance(sequence_.begin(), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeArray) { + auto p = absl::c_equal_range(array_, 2); + EXPECT_EQ(1, std::distance(std::begin(array_), p.first)); + EXPECT_EQ(2, std::distance(std::begin(array_), p.second)); +} + +TEST_F(NonMutatingTest, EqualRangeWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + std::pair::iterator, std::vector::iterator> p = + absl::c_equal_range(v, 2, std::greater()); + EXPECT_EQ(1, std::distance(v.begin(), p.first)); + EXPECT_EQ(2, std::distance(v.begin(), p.second)); +} + +TEST_F(NonMutatingTest, BinarySearch) { + EXPECT_TRUE(absl::c_binary_search(vector_, 2)); + EXPECT_TRUE(absl::c_binary_search(std::vector(vector_), 2)); +} + +TEST_F(NonMutatingTest, BinarySearchWithPredicate) { + std::vector v(vector_); + std::sort(v.begin(), v.end(), std::greater()); + EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater())); + EXPECT_TRUE( + absl::c_binary_search(std::vector(v), 2, std::greater())); +} + +TEST_F(NonMutatingTest, MinElement) { + std::list::iterator i = absl::c_min_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, MinElementWithPredicate) { + std::list::iterator i = + absl::c_min_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElement) { + std::list::iterator i = absl::c_max_element(sequence_); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 3); +} + +TEST_F(NonMutatingTest, MaxElementWithPredicate) { + std::list::iterator i = + absl::c_max_element(sequence_, std::greater()); + ASSERT_TRUE(i != sequence_.end()); + EXPECT_EQ(*i, 1); +} + +TEST_F(NonMutatingTest, LexicographicalCompare) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_)); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v)); + EXPECT_TRUE(absl::c_lexicographical_compare(std::list(sequence_), v)); +} + +TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) { + EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_, + std::greater())); + + std::vector v; + v.push_back(1); + v.push_back(2); + v.push_back(4); + + EXPECT_TRUE( + absl::c_lexicographical_compare(v, sequence_, std::greater())); + EXPECT_TRUE(absl::c_lexicographical_compare( + std::vector(v), std::list(sequence_), std::greater())); +} + +TEST_F(NonMutatingTest, Includes) { + std::set s(vector_.begin(), vector_.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, vector_)); +} + +TEST_F(NonMutatingTest, IncludesWithPredicate) { + std::vector v = {3, 2, 1}; + std::set> s(v.begin(), v.end()); + s.insert(4); + EXPECT_TRUE(absl::c_includes(s, v, std::greater())); +} + +class NumericMutatingTest : public testing::Test { + protected: + std::list list_ = {1, 2, 3}; + std::vector output_; +}; + +TEST_F(NumericMutatingTest, Iota) { + absl::c_iota(list_, 5); + std::list expected{5, 6, 7}; + EXPECT_EQ(list_, expected); +} + +TEST_F(NonMutatingTest, Accumulate) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOp) { + EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, AccumulateLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4); +} + +TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) { + int lvalue = 4; + EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies()), + 1 * 2 * 3 * 4); +} + +TEST_F(NonMutatingTest, InnerProduct) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOps) { + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NonMutatingTest, InnerProductLvalueInit) { + int lvalue = 1000; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue), + 1000 + 1 * 1 + 2 * 2 + 3 * 3); +} + +TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) { + int lvalue = 10; + EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue, + std::multiplies(), std::plus()), + 10 * (1 + 1) * (2 + 2) * (3 + 3)); +} + +TEST_F(NumericMutatingTest, AdjacentDifference) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 2 - 1, 3 - 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) { + auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 2 * 1, 3 * 2, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSum) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_)); + *last = 1000; + std::vector expected{1, 1 + 2, 1 + 2 + 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) { + auto last = absl::c_partial_sum(list_, std::back_inserter(output_), + std::multiplies()); + *last = 1000; + std::vector expected{1, 1 * 2, 1 * 2 * 3, 1000}; + EXPECT_EQ(output_, expected); +} + +TEST_F(NonMutatingTest, LinearSearch) { + EXPECT_TRUE(absl::c_linear_search(container_, 3)); + EXPECT_FALSE(absl::c_linear_search(container_, 4)); +} + +TEST_F(NonMutatingTest, AllOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; })); + EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; })); +} + +TEST_F(NonMutatingTest, AnyOf) { + const std::vector& v = vector_; + EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; })); + EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, NoneOf) { + const std::vector& v = vector_; + EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; })); + EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; })); +} + +TEST_F(NonMutatingTest, MinMaxElementLess) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::less()); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +TEST_F(NonMutatingTest, MinMaxElementGreater) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_, std::greater()); + EXPECT_TRUE(p.first == vector_.begin() + 2); + EXPECT_TRUE(p.second == vector_.begin()); +} + +TEST_F(NonMutatingTest, MinMaxElementNoPredicate) { + std::pair::const_iterator, std::vector::const_iterator> + p = absl::c_minmax_element(vector_); + EXPECT_TRUE(p.first == vector_.begin()); + EXPECT_TRUE(p.second == vector_.begin() + 2); +} + +class SortingTest : public testing::Test { + protected: + std::list sorted_ = {1, 2, 3, 4}; + std::list unsorted_ = {2, 4, 1, 3}; + std::list reversed_ = {4, 3, 2, 1}; +}; + +TEST_F(SortingTest, IsSorted) { + EXPECT_TRUE(absl::c_is_sorted(sorted_)); + EXPECT_FALSE(absl::c_is_sorted(unsorted_)); + EXPECT_FALSE(absl::c_is_sorted(reversed_)); +} + +TEST_F(SortingTest, IsSortedWithPredicate) { + EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater())); + EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater())); + EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater())); +} + +TEST_F(SortingTest, IsSortedUntil) { + EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_)); + EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater())); +} + +TEST_F(SortingTest, NthElement) { + std::vector unsorted = {2, 4, 1, 3}; + absl::c_nth_element(unsorted, unsorted.begin() + 2); + EXPECT_THAT(unsorted, ElementsAre(Lt(3), Lt(3), 3, Gt(3))); + absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater()); + EXPECT_THAT(unsorted, ElementsAre(Gt(2), Gt(2), 2, Lt(2))); +} + +TEST(MutatingTest, IsPartitioned) { + EXPECT_TRUE( + absl::c_is_partitioned(std::vector{1, 3, 5, 2, 4, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{1, 2, 3, 4, 5, 6}, IsOdd)); + EXPECT_FALSE( + absl::c_is_partitioned(std::vector{2, 4, 6, 1, 3, 5}, IsOdd)); +} + +TEST(MutatingTest, Partition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_partition(actual, IsOdd); + EXPECT_THAT(actual, Truly([](const std::vector& c) { + return absl::c_is_partitioned(c, IsOdd); + })); +} + +TEST(MutatingTest, StablePartition) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_stable_partition(actual, IsOdd); + EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4)); +} + +TEST(MutatingTest, PartitionCopy) { + const std::vector initial = {1, 2, 3, 4, 5}; + std::vector odds, evens; + auto ends = absl::c_partition_copy(initial, back_inserter(odds), + back_inserter(evens), IsOdd); + *ends.first = 7; + *ends.second = 6; + EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST(MutatingTest, PartitionPoint) { + const std::vector initial = {1, 3, 5, 2, 4}; + auto middle = absl::c_partition_point(initial, IsOdd); + EXPECT_EQ(2, *middle); +} + +TEST(MutatingTest, CopyMiddle) { + const std::vector initial = {4, -1, -2, -3, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 1, 2, 3, 5}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, ++test_list.begin()); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, test_vector.begin() + 1); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyFrontInserter) { + const std::list initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::list expected = {3, 2, 1, 4, 5}; + + std::list test_list = initial; + absl::c_copy(input, std::front_inserter(test_list)); + EXPECT_EQ(expected, test_list); +} + +TEST(MutatingTest, CopyBackInserter) { + const std::vector initial = {4, 5}; + const std::list input = {1, 2, 3}; + const std::vector expected = {4, 5, 1, 2, 3}; + + std::list test_list(initial.begin(), initial.end()); + absl::c_copy(input, std::back_inserter(test_list)); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); + + std::vector test_vector = initial; + absl::c_copy(input, std::back_inserter(test_vector)); + EXPECT_EQ(expected, test_vector); +} + +TEST(MutatingTest, CopyN) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {1, 2}; + std::vector actual; + absl::c_copy_n(initial, 2, back_inserter(actual)); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, CopyIf) { + const std::list input = {1, 2, 3}; + std::vector output; + absl::c_copy_if(input, std::back_inserter(output), + [](int i) { return i != 2; }); + EXPECT_THAT(output, ElementsAre(1, 3)); +} + +TEST(MutatingTest, CopyBackward) { + std::vector actual = {1, 2, 3, 4, 5}; + std::vector expected = {1, 2, 1, 2, 3}; + absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end()); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Move) { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + src.emplace_back(absl::make_unique(4)); + src.emplace_back(absl::make_unique(5)); + + std::vector> dest = {}; + absl::c_move(src, std::back_inserter(dest)); + EXPECT_THAT(src, Each(IsNull())); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4), + Pointee(5))); +} + +TEST(MutatingTest, MoveBackward) { + std::vector> actual; + actual.emplace_back(absl::make_unique(1)); + actual.emplace_back(absl::make_unique(2)); + actual.emplace_back(absl::make_unique(3)); + actual.emplace_back(absl::make_unique(4)); + actual.emplace_back(absl::make_unique(5)); + auto subrange = absl::MakeSpan(actual.data(), 3); + absl::c_move_backward(subrange, actual.end()); + EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2), + Pointee(3))); +} + +TEST(MutatingTest, MoveWithRvalue) { + auto MakeRValueSrc = [] { + std::vector> src; + src.emplace_back(absl::make_unique(1)); + src.emplace_back(absl::make_unique(2)); + src.emplace_back(absl::make_unique(3)); + return src; + }; + + std::vector> dest = MakeRValueSrc(); + absl::c_move(MakeRValueSrc(), std::back_inserter(dest)); + EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1), + Pointee(2), Pointee(3))); +} + +TEST(MutatingTest, SwapRanges) { + std::vector odds = {2, 4, 6}; + std::vector evens = {1, 3, 5}; + absl::c_swap_ranges(odds, evens); + EXPECT_THAT(odds, ElementsAre(1, 3, 5)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); + + odds.pop_back(); + absl::c_swap_ranges(odds, evens); + EXPECT_THAT(odds, ElementsAre(2, 4)); + EXPECT_THAT(evens, ElementsAre(1, 3, 6)); + + absl::c_swap_ranges(evens, odds); + EXPECT_THAT(odds, ElementsAre(1, 3)); + EXPECT_THAT(evens, ElementsAre(2, 4, 6)); +} + +TEST_F(NonMutatingTest, Transform) { + std::vector x{0, 2, 4}, y, z; + auto end = absl::c_transform(x, back_inserter(y), std::negate()); + EXPECT_EQ(std::vector({0, -2, -4}), y); + *end = 7; + EXPECT_EQ(std::vector({0, -2, -4, 7}), y); + + y = {1, 3, 0}; + end = absl::c_transform(x, y, back_inserter(z), std::plus()); + EXPECT_EQ(std::vector({1, 5, 4}), z); + *end = 7; + EXPECT_EQ(std::vector({1, 5, 4, 7}), z); + + z.clear(); + y.pop_back(); + end = absl::c_transform(x, y, std::back_inserter(z), std::plus()); + EXPECT_EQ(std::vector({1, 5}), z); + *end = 7; + EXPECT_EQ(std::vector({1, 5, 7}), z); + + z.clear(); + std::swap(x, y); + end = absl::c_transform(x, y, std::back_inserter(z), std::plus()); + EXPECT_EQ(std::vector({1, 5}), z); + *end = 7; + EXPECT_EQ(std::vector({1, 5, 7}), z); +} + +TEST(MutatingTest, Replace) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector test_vector = initial; + absl::c_replace(test_vector, 1, 4); + EXPECT_EQ(expected, test_vector); + + std::list test_list(initial.begin(), initial.end()); + absl::c_replace(test_list, 1, 4); + EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); +} + +TEST(MutatingTest, ReplaceIf) { + std::vector actual = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + absl::c_replace_if(actual, IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, ReplaceCopy) { + const std::vector initial = {1, 2, 3, 1, 4, 5}; + const std::vector expected = {4, 2, 3, 4, 4, 5}; + + std::vector actual; + absl::c_replace_copy(initial, back_inserter(actual), 1, 4); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Sort) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector); + EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4)); +} + +TEST(MutatingTest, SortWithPredicate) { + std::vector test_vector = {2, 3, 1, 4}; + absl::c_sort(test_vector, std::greater()); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); +} + +// For absl::c_stable_sort tests. Needs an operator< that does not cover all +// fields so that the test can check the sort preserves order of equal elements. +struct Element { + int key; + int value; + friend bool operator<(const Element& e1, const Element& e2) { + return e1.key < e2.key; + } + // Make gmock print useful diagnostics. + friend std::ostream& operator<<(std::ostream& o, const Element& e) { + return o << "{" << e.key << ", " << e.value << "}"; + } +}; + +MATCHER_P2(IsElement, key, value, "") { + return arg.key == key && arg.value == value; +} + +TEST(MutatingTest, StableSort) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector); + EXPECT_THAT(test_vector, + ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1), + IsElement(2, 0), IsElement(2, 2))); +} + +TEST(MutatingTest, StableSortWithPredicate) { + std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; + absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) { + return e2 < e1; + }); + EXPECT_THAT(test_vector, + ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2), + IsElement(1, 1), IsElement(1, 0))); +} + +TEST(MutatingTest, ReplaceCopyIf) { + const std::vector initial = {1, 2, 3, 4, 5}; + const std::vector expected = {0, 2, 0, 4, 0}; + + std::vector actual; + absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0); + EXPECT_EQ(expected, actual); +} + +TEST(MutatingTest, Fill) { + std::vector actual(5); + absl::c_fill(actual, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1)); +} + +TEST(MutatingTest, FillN) { + std::vector actual(5, 0); + absl::c_fill_n(actual, 2, 1); + EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0)); +} + +TEST(MutatingTest, Generate) { + std::vector actual(5); + int x = 0; + absl::c_generate(actual, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, GenerateN) { + std::vector actual(5, 0); + int x = 0; + absl::c_generate_n(actual, 3, [&x]() { return ++x; }); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0)); +} + +TEST(MutatingTest, RemoveCopy) { + std::vector actual; + absl::c_remove_copy(std::vector{1, 2, 3}, back_inserter(actual), 2); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST(MutatingTest, RemoveCopyIf) { + std::vector actual; + absl::c_remove_copy_if(std::vector{1, 2, 3}, back_inserter(actual), + IsOdd); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST(MutatingTest, UniqueCopy) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 2, 2, 3, 3, 2}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2)); +} + +TEST(MutatingTest, UniqueCopyWithPredicate) { + std::vector actual; + absl::c_unique_copy(std::vector{1, 2, 3, -1, -2, -3, 1}, + back_inserter(actual), + [](int x, int y) { return (x < 0) == (y < 0); }); + EXPECT_THAT(actual, ElementsAre(1, -1, 1)); +} + +TEST(MutatingTest, Reverse) { + std::vector test_vector = {1, 2, 3, 4}; + absl::c_reverse(test_vector); + EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); + + std::list test_list = {1, 2, 3, 4}; + absl::c_reverse(test_list); + EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, ReverseCopy) { + std::vector actual; + absl::c_reverse_copy(std::vector{1, 2, 3, 4}, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1)); +} + +TEST(MutatingTest, Rotate) { + std::vector actual = {1, 2, 3, 4}; + auto it = absl::c_rotate(actual, actual.begin() + 2); + EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2})); + EXPECT_EQ(*it, 1); +} + +TEST(MutatingTest, RotateCopy) { + std::vector initial = {1, 2, 3, 4}; + std::vector actual; + auto end = + absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual)); + *end = 5; + EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5)); +} + +template +T RandomlySeededPrng() { + std::random_device rdev; + std::seed_seq::result_type data[T::state_size]; + std::generate_n(data, T::state_size, std::ref(rdev)); + std::seed_seq prng_seed(data, data + T::state_size); + return T(prng_seed); +} + +TEST(MutatingTest, Shuffle) { + std::vector actual = {1, 2, 3, 4, 5}; + absl::c_shuffle(actual, RandomlySeededPrng()); + EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, Sample) { + std::vector actual; + absl::c_sample(std::vector{1, 2, 3, 4, 5}, std::back_inserter(actual), 3, + RandomlySeededPrng()); + EXPECT_THAT(actual, IsSubsetOf({1, 2, 3, 4, 5})); + EXPECT_THAT(actual, SizeIs(3)); +} + +TEST(MutatingTest, PartialSort) { + std::vector sequence{5, 3, 42, 0}; + absl::c_partial_sort(sequence, sequence.begin() + 2); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3)); + absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater()); + EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5)); +} + +TEST(MutatingTest, PartialSortCopy) { + const std::vector initial = {5, 3, 42, 0}; + std::vector actual(2); + absl::c_partial_sort_copy(initial, actual); + EXPECT_THAT(actual, ElementsAre(0, 3)); + absl::c_partial_sort_copy(initial, actual, std::greater()); + EXPECT_THAT(actual, ElementsAre(42, 5)); +} + +TEST(MutatingTest, Merge) { + std::vector actual; + absl::c_merge(std::vector{1, 3, 5}, std::vector{2, 4}, + back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, MergeWithComparator) { + std::vector actual; + absl::c_merge(std::vector{5, 3, 1}, std::vector{4, 2}, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +TEST(MutatingTest, InplaceMerge) { + std::vector actual = {1, 3, 5, 2, 4}; + absl::c_inplace_merge(actual, actual.begin() + 3); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(MutatingTest, InplaceMergeWithComparator) { + std::vector actual = {5, 3, 1, 4, 2}; + absl::c_inplace_merge(actual, actual.begin() + 3, std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); +} + +class SetOperationsTest : public testing::Test { + protected: + std::vector a_ = {1, 2, 3}; + std::vector b_ = {1, 3, 5}; + + std::vector a_reversed_ = {3, 2, 1}; + std::vector b_reversed_ = {5, 3, 1}; +}; + +TEST_F(SetOperationsTest, SetUnion) { + std::vector actual; + absl::c_set_union(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5)); +} + +TEST_F(SetOperationsTest, SetUnionWithComparator) { + std::vector actual; + absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1)); +} + +TEST_F(SetOperationsTest, SetIntersection) { + std::vector actual; + absl::c_set_intersection(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(1, 3)); +} + +TEST_F(SetOperationsTest, SetIntersectionWithComparator) { + std::vector actual; + absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(3, 1)); +} + +TEST_F(SetOperationsTest, SetDifference) { + std::vector actual; + absl::c_set_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetDifferenceWithComparator) { + std::vector actual; + absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual), + std::greater()); + EXPECT_THAT(actual, ElementsAre(2)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifference) { + std::vector actual; + absl::c_set_symmetric_difference(a_, b_, back_inserter(actual)); + EXPECT_THAT(actual, ElementsAre(2, 5)); +} + +TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) { + std::vector actual; + absl::c_set_symmetric_difference(a_reversed_, b_reversed_, + back_inserter(actual), std::greater()); + EXPECT_THAT(actual, ElementsAre(5, 2)); +} + +TEST(HeapOperationsTest, WithoutComparator) { + std::vector heap = {1, 2, 3}; + EXPECT_FALSE(absl::c_is_heap(heap)); + absl::c_make_heap(heap); + EXPECT_TRUE(absl::c_is_heap(heap)); + heap.push_back(4); + EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin()); + absl::c_push_heap(heap); + EXPECT_EQ(4, heap[0]); + absl::c_pop_heap(heap); + EXPECT_EQ(4, heap[3]); + absl::c_make_heap(heap); + absl::c_sort_heap(heap); + EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4)); + EXPECT_FALSE(absl::c_is_heap(heap)); +} + +TEST(HeapOperationsTest, WithComparator) { + using greater = std::greater; + std::vector heap = {3, 2, 1}; + EXPECT_FALSE(absl::c_is_heap(heap, greater())); + absl::c_make_heap(heap, greater()); + EXPECT_TRUE(absl::c_is_heap(heap, greater())); + heap.push_back(0); + EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin()); + absl::c_push_heap(heap, greater()); + EXPECT_EQ(0, heap[0]); + absl::c_pop_heap(heap, greater()); + EXPECT_EQ(0, heap[3]); + absl::c_make_heap(heap, greater()); + absl::c_sort_heap(heap, greater()); + EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0)); + EXPECT_FALSE(absl::c_is_heap(heap, greater())); +} + +TEST(MutatingTest, PermutationOperations) { + std::vector initial = {1, 2, 3, 4}; + std::vector permuted = initial; + + absl::c_next_permutation(permuted); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted)); + EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to())); + + std::vector permuted2 = initial; + absl::c_prev_permutation(permuted2, std::greater()); + EXPECT_EQ(permuted, permuted2); + + absl::c_prev_permutation(permuted); + EXPECT_EQ(initial, permuted); +} + +} // namespace diff --git a/third_party/absl/absl/base/BUILD.bazel b/third_party/absl/absl/base/BUILD.bazel new file mode 100644 index 000000000..0eb735dac --- /dev/null +++ b/third_party/absl/absl/base/BUILD.bazel @@ -0,0 +1,880 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) + +licenses(["notice"]) + +cc_library( + name = "atomic_hook", + hdrs = ["internal/atomic_hook.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "errno_saver", + hdrs = ["internal/errno_saver.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [":config"], +) + +cc_library( + name = "log_severity", + srcs = ["log_severity.cc"], + hdrs = ["log_severity.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "no_destructor", + hdrs = ["no_destructor.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [":config"], +) + +cc_library( + name = "nullability", + srcs = ["internal/nullability_impl.h"], + hdrs = ["nullability.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":core_headers", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "raw_logging_internal", + srcs = ["internal/raw_logging.cc"], + hdrs = ["internal/raw_logging.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":atomic_hook", + ":config", + ":core_headers", + ":errno_saver", + ":log_severity", + ], +) + +cc_library( + name = "spinlock_wait", + srcs = [ + "internal/spinlock_akaros.inc", + "internal/spinlock_linux.inc", + "internal/spinlock_posix.inc", + "internal/spinlock_wait.cc", + "internal/spinlock_win32.inc", + ], + hdrs = ["internal/spinlock_wait.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base_internal", + ":core_headers", + ":errno_saver", + ], +) + +cc_library( + name = "config", + hdrs = [ + "config.h", + "options.h", + "policy_checks.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, +) + +cc_library( + name = "cycleclock_internal", + hdrs = [ + "internal/cycleclock_config.h", + "internal/unscaledcycleclock_config.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":base_internal", + ":config", + ], +) + +cc_library( + name = "dynamic_annotations", + srcs = [ + "internal/dynamic_annotations.h", + ], + hdrs = [ + "dynamic_annotations.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_library( + name = "core_headers", + hdrs = [ + "attributes.h", + "const_init.h", + "macros.h", + "optimization.h", + "port.h", + "thread_annotations.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ], +) + +cc_library( + name = "malloc_internal", + srcs = [ + "internal/low_level_alloc.cc", + ], + hdrs = [ + "internal/direct_mmap.h", + "internal/low_level_alloc.h", + ], + copts = ABSL_DEFAULT_COPTS + select({ + "//conditions:default": [], + }), + linkopts = select({ + "//absl:msvc_compiler": [], + "//absl:clang-cl_compiler": [], + "//absl:wasm": [], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//visibility:public", + ], + deps = [ + ":base", + ":base_internal", + ":config", + ":core_headers", + ":dynamic_annotations", + ":raw_logging_internal", + ], +) + +cc_library( + name = "base_internal", + hdrs = [ + "internal/hide_ptr.h", + "internal/identity.h", + "internal/inline_variable.h", + "internal/invoke.h", + "internal/scheduling_mode.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "base", + srcs = [ + "internal/cycleclock.cc", + "internal/spinlock.cc", + "internal/sysinfo.cc", + "internal/thread_identity.cc", + "internal/unscaledcycleclock.cc", + ], + hdrs = [ + "call_once.h", + "casts.h", + "internal/cycleclock.h", + "internal/low_level_scheduling.h", + "internal/per_thread_tls.h", + "internal/spinlock.h", + "internal/sysinfo.h", + "internal/thread_identity.h", + "internal/tsan_mutex_interface.h", + "internal/unscaledcycleclock.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = select({ + "//absl:msvc_compiler": [ + "-DEFAULTLIB:advapi32.lib", + ], + "//absl:clang-cl_compiler": [ + "-DEFAULTLIB:advapi32.lib", + ], + "//absl:mingw_compiler": [ + "-DEFAULTLIB:advapi32.lib", + "-ladvapi32", + ], + "//absl:wasm": [], + "//conditions:default": ["-pthread"], + }) + ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":base_internal", + ":config", + ":core_headers", + ":cycleclock_internal", + ":dynamic_annotations", + ":log_severity", + ":nullability", + ":raw_logging_internal", + ":spinlock_wait", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "atomic_hook_test_helper", + testonly = 1, + srcs = ["internal/atomic_hook_test_helper.cc"], + hdrs = ["internal/atomic_hook_test_helper.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":core_headers", + ], +) + +cc_test( + name = "atomic_hook_test", + size = "small", + srcs = ["internal/atomic_hook_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":atomic_hook", + ":atomic_hook_test_helper", + ":core_headers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "bit_cast_test", + size = "small", + srcs = [ + "bit_cast_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "throw_delegate", + srcs = ["internal/throw_delegate.cc"], + hdrs = ["internal/throw_delegate.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "throw_delegate_test", + srcs = ["throw_delegate_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":throw_delegate", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "errno_saver_test", + size = "small", + srcs = ["internal/errno_saver_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":errno_saver", + ":strerror", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "exception_testing", + testonly = 1, + hdrs = ["internal/exception_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "pretty_function", + hdrs = ["internal/pretty_function.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], +) + +cc_library( + name = "exception_safety_testing", + testonly = 1, + srcs = ["internal/exception_safety_testing.cc"], + hdrs = ["internal/exception_safety_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":pretty_function", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/utility", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "exception_safety_testing_test", + srcs = ["exception_safety_testing_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":exception_safety_testing", + "//absl/memory", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "inline_variable_test", + size = "small", + srcs = [ + "inline_variable_test.cc", + "inline_variable_test_a.cc", + "inline_variable_test_b.cc", + "internal/inline_variable_testing.h", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "invoke_test", + size = "small", + srcs = ["invoke_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base_internal", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +# Common test library made available for use in non-absl code that overrides +# AbslInternalSpinLockDelay and AbslInternalSpinLockWake. +cc_library( + name = "spinlock_test_common", + testonly = 1, + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":base_internal", + ":config", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + ], + alwayslink = 1, +) + +cc_test( + name = "spinlock_test", + size = "medium", + srcs = ["spinlock_test_common.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = [ + "no_test_wasm", + ], + deps = [ + ":base", + ":base_internal", + ":config", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "spinlock_benchmark_common", + testonly = 1, + srcs = ["internal/spinlock_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl/base:__pkg__", + ], + deps = [ + ":base", + ":base_internal", + ":no_destructor", + ":raw_logging_internal", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + ], + alwayslink = 1, +) + +cc_binary( + name = "spinlock_benchmark", + testonly = 1, + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":spinlock_benchmark_common", + ], +) + +cc_library( + name = "endian", + hdrs = [ + "internal/endian.h", + "internal/unaligned_access.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":config", + ":core_headers", + ":nullability", + ], +) + +cc_test( + name = "endian_test", + srcs = ["internal/endian_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":config", + ":endian", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "config_test", + srcs = ["config_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + "//absl/synchronization:thread_pool", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "call_once_test", + srcs = ["call_once_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "no_destructor_test", + srcs = ["no_destructor_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":no_destructor", + ":raw_logging_internal", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "no_destructor_benchmark", + testonly = 1, + srcs = ["no_destructor_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":no_destructor", + ":raw_logging_internal", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_test( + name = "nullability_test", + srcs = ["nullability_test.cc"], + deps = [ + ":core_headers", + ":nullability", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "raw_logging_test", + srcs = ["raw_logging_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":raw_logging_internal", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "sysinfo_test", + size = "small", + srcs = ["internal/sysinfo_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":base", + "//absl/synchronization", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "low_level_alloc_test", + size = "medium", + srcs = ["internal/low_level_alloc_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = [ + "no_test_ios_x86_64", + "no_test_wasm", + ], + deps = [ + ":malloc_internal", + "//absl/container:node_hash_map", + ], +) + +cc_test( + name = "thread_identity_test", + size = "small", + srcs = ["internal/thread_identity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = [ + "no_test_wasm", + ], + deps = [ + ":base", + ":core_headers", + "//absl/synchronization", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "thread_identity_benchmark", + srcs = ["internal/thread_identity_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":base", + "//absl/synchronization", + "@com_github_google_benchmark//:benchmark_main", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "scoped_set_env", + testonly = 1, + srcs = ["internal/scoped_set_env.cc"], + hdrs = ["internal/scoped_set_env.h"], + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":raw_logging_internal", + ], +) + +cc_test( + name = "scoped_set_env_test", + size = "small", + srcs = ["internal/scoped_set_env_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":scoped_set_env", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "log_severity_test", + size = "small", + srcs = ["log_severity_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":log_severity", + "//absl/flags:flag_internal", + "//absl/flags:marshalling", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "strerror", + srcs = ["internal/strerror.cc"], + hdrs = ["internal/strerror.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ":core_headers", + ":errno_saver", + ], +) + +cc_test( + name = "strerror_test", + size = "small", + srcs = ["internal/strerror_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":strerror", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "strerror_benchmark", + testonly = 1, + srcs = ["internal/strerror_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":strerror", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "fast_type_id", + hdrs = ["internal/fast_type_id.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":config", + ], +) + +cc_test( + name = "fast_type_id_test", + size = "small", + srcs = ["internal/fast_type_id_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fast_type_id", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "prefetch", + hdrs = [ + "prefetch.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":config", + ":core_headers", + ], +) + +cc_test( + name = "prefetch_test", + size = "small", + srcs = [ + "prefetch_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":prefetch", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unique_small_name_test", + size = "small", + srcs = ["internal/unique_small_name_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + linkstatic = 1, + deps = [ + ":core_headers", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "optimization_test", + size = "small", + srcs = ["optimization_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":core_headers", + "//absl/types:optional", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/third_party/absl/absl/base/CMakeLists.txt b/third_party/absl/absl/base/CMakeLists.txt new file mode 100644 index 000000000..4cfc22858 --- /dev/null +++ b/third_party/absl/absl/base/CMakeLists.txt @@ -0,0 +1,739 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +find_library(LIBRT rt) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + atomic_hook + HDRS + "internal/atomic_hook.h" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + errno_saver + HDRS + "internal/errno_saver.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + log_severity + HDRS + "log_severity.h" + SRCS + "log_severity.cc" + DEPS + absl::config + absl::core_headers + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + no_destructor + HDRS + "no_destructor.h" + DEPS + absl::config + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_library( + NAME + nullability + HDRS + "nullability.h" + SRCS + "internal/nullability_impl.h" + DEPS + absl::core_headers + absl::type_traits + COPTS + ${ABSL_DEFAULT_COPTS} +) + +absl_cc_test( + NAME + nullability_test + SRCS + "nullability_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::nullability + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_logging_internal + HDRS + "internal/raw_logging.h" + SRCS + "internal/raw_logging.cc" + DEPS + absl::atomic_hook + absl::config + absl::core_headers + absl::errno_saver + absl::log_severity + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + spinlock_wait + HDRS + "internal/spinlock_wait.h" + SRCS + "internal/spinlock_akaros.inc" + "internal/spinlock_linux.inc" + "internal/spinlock_posix.inc" + "internal/spinlock_wait.cc" + "internal/spinlock_win32.inc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::errno_saver +) + +absl_cc_library( + NAME + config + HDRS + "config.h" + "options.h" + "policy_checks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + PUBLIC +) + +absl_cc_library( + NAME + dynamic_annotations + HDRS + "dynamic_annotations.h" + SRCS + "internal/dynamic_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_library( + NAME + core_headers + HDRS + "attributes.h" + "const_init.h" + "macros.h" + "optimization.h" + "port.h" + "thread_annotations.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + malloc_internal + HDRS + "internal/direct_mmap.h" + "internal/low_level_alloc.h" + SRCS + "internal/low_level_alloc.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::raw_logging_internal + Threads::Threads +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + base_internal + HDRS + "internal/hide_ptr.h" + "internal/identity.h" + "internal/inline_variable.h" + "internal/invoke.h" + "internal/scheduling_mode.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::type_traits +) + +absl_cc_library( + NAME + base + HDRS + "call_once.h" + "casts.h" + "internal/cycleclock.h" + "internal/cycleclock_config.h" + "internal/low_level_scheduling.h" + "internal/per_thread_tls.h" + "internal/spinlock.h" + "internal/sysinfo.h" + "internal/thread_identity.h" + "internal/tsan_mutex_interface.h" + "internal/unscaledcycleclock.h" + "internal/unscaledcycleclock_config.h" + SRCS + "internal/cycleclock.cc" + "internal/spinlock.cc" + "internal/sysinfo.cc" + "internal/thread_identity.cc" + "internal/unscaledcycleclock.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + $<$:-lrt> + $<$:-ladvapi32> + DEPS + absl::atomic_hook + absl::base_internal + absl::config + absl::core_headers + absl::dynamic_annotations + absl::log_severity + absl::nullability + absl::raw_logging_internal + absl::spinlock_wait + absl::type_traits + Threads::Threads + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + throw_delegate + HDRS + "internal/throw_delegate.h" + SRCS + "internal/throw_delegate.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + exception_testing + HDRS + "internal/exception_testing.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + GTest::gtest + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + pretty_function + HDRS + "internal/pretty_function.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + exception_safety_testing + HDRS + "internal/exception_safety_testing.h" + SRCS + "internal/exception_safety_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::pretty_function + absl::memory + absl::meta + absl::strings + absl::utility + GTest::gtest + TESTONLY +) + +absl_cc_test( + NAME + absl_exception_safety_testing_test + SRCS + "exception_safety_testing_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::exception_safety_testing + absl::memory + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + atomic_hook_test_helper + SRCS + "internal/atomic_hook_test_helper.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook + absl::core_headers + TESTONLY +) + +absl_cc_test( + NAME + atomic_hook_test + SRCS + "internal/atomic_hook_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::atomic_hook_test_helper + absl::atomic_hook + absl::core_headers + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + bit_cast_test + SRCS + "bit_cast_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + GTest::gtest_main +) + +absl_cc_test( + NAME + errno_saver_test + SRCS + "internal/errno_saver_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::errno_saver + absl::strerror + GTest::gmock + GTest::gtest_main +) + +absl_cc_test( + NAME + throw_delegate_test + SRCS + "throw_delegate_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::throw_delegate + GTest::gtest_main +) + +absl_cc_test( + NAME + inline_variable_test + SRCS + "internal/inline_variable_testing.h" + "inline_variable_test.cc" + "inline_variable_test_a.cc" + "inline_variable_test_b.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + GTest::gtest_main +) + +absl_cc_test( + NAME + invoke_test + SRCS + "invoke_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base_internal + absl::memory + absl::strings + GTest::gmock + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + spinlock_test_common + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::base_internal + absl::core_headers + absl::synchronization + GTest::gtest + TESTONLY +) + +# On bazel BUILD this target use "alwayslink = 1" which is not implemented here +absl_cc_test( + NAME + spinlock_test + SRCS + "spinlock_test_common.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::base_internal + absl::config + absl::core_headers + absl::synchronization + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + endian + HDRS + "internal/endian.h" + "internal/unaligned_access.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::core_headers + absl::nullability + PUBLIC +) + +absl_cc_test( + NAME + endian_test + SRCS + "internal/endian_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::endian + GTest::gtest_main +) + +absl_cc_test( + NAME + config_test + SRCS + "config_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::synchronization + GTest::gtest_main +) + +absl_cc_test( + NAME + call_once_test + SRCS + "call_once_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + GTest::gtest_main +) + +absl_cc_test( + NAME + no_destructor_test + SRCS + "no_destructor_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::no_destructor + absl::config + absl::raw_logging_internal + GTest::gtest_main +) + +absl_cc_test( + NAME + raw_logging_test + SRCS + "raw_logging_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::raw_logging_internal + absl::strings + GTest::gtest_main +) + +absl_cc_test( + NAME + sysinfo_test + SRCS + "internal/sysinfo_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::synchronization + GTest::gtest_main +) + +absl_cc_test( + NAME + low_level_alloc_test + SRCS + "internal/low_level_alloc_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::malloc_internal + absl::node_hash_map + Threads::Threads +) + +absl_cc_test( + NAME + thread_identity_test + SRCS + "internal/thread_identity_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::core_headers + absl::synchronization + Threads::Threads + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + scoped_set_env + SRCS + "internal/scoped_set_env.cc" + HDRS + "internal/scoped_set_env.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::raw_logging_internal +) + +absl_cc_test( + NAME + scoped_set_env_test + SRCS + "internal/scoped_set_env_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::scoped_set_env + GTest::gtest_main +) + +absl_cc_test( + NAME + cmake_thread_test + SRCS + "internal/cmake_thread_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base +) + +absl_cc_test( + NAME + log_severity_test + SRCS + "log_severity_test.cc" + DEPS + absl::flags_internal + absl::flags_marshalling + absl::log_severity + absl::strings + GTest::gmock + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + strerror + SRCS + "internal/strerror.cc" + HDRS + "internal/strerror.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers + absl::errno_saver +) + +absl_cc_test( + NAME + strerror_test + SRCS + "internal/strerror_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::strerror + absl::strings + GTest::gmock + GTest::gtest_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + fast_type_id + HDRS + "internal/fast_type_id.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config +) + +absl_cc_test( + NAME + fast_type_id_test + SRCS + "internal/fast_type_id_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fast_type_id + GTest::gtest_main +) + +absl_cc_library( + NAME + prefetch + HDRS + "prefetch.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::config + absl::core_headers +) + +absl_cc_test( + NAME + prefetch_test + SRCS + "prefetch_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::prefetch + GTest::gtest_main +) + +absl_cc_test( + NAME + optimization_test + SRCS + "optimization_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::core_headers + absl::optional + GTest::gtest_main +) diff --git a/third_party/absl/absl/base/attributes.h b/third_party/absl/absl/base/attributes.h new file mode 100644 index 000000000..d4f67a129 --- /dev/null +++ b/third_party/absl/absl/base/attributes.h @@ -0,0 +1,874 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). + +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +#include "absl/base/config.h" + +// ABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE +// ABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE +// ABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// ABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes did not work properly in LLVM's Windows backend before +// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ + !defined(__MINGW32__) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// ABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif + +// ABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) && \ + ABSL_HAVE_ATTRIBUTE(no_sanitize_address) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \ + _MSC_VER >= 1928 +// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) +#elif defined(ABSL_HAVE_HWADDRESS_SANITIZER) && ABSL_HAVE_ATTRIBUTE(no_sanitize) +// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU +// features to detect similar bugs with less CPU and memory overhead. +// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11. +// https://gcc.gnu.org/gcc-11/changes.html +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \ + __attribute__((no_sanitize("hwaddress"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All "Use +// of uninitialized value" warnings from such functions will be suppressed, and +// all values loaded from memory will be considered fully initialized. This +// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute +// above, but deals with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize_undefined)) +#elif ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// ABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) +#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define ABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// ABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 + +// ABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + +// ABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#ifdef _AIX +// __attribute__((section(#name))) on AIX is achieved by using the `.csect` +// psudo op which includes an additional integer as part of its syntax indcating +// alignment. If data fall under different alignments then you might get a +// compilation error indicating a `Section type conflict`. +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#else +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif +#endif + +// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// ABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) + +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) + +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard `[[nodiscard]]` directly over this macro. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +// +// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is +// compliant with the stricter [[nodiscard]]. +#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +// XRay isn't currently supported on Android: +// https://github.com/android/ndk/issues/368 +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard '[[maybe_unused]]' directly over this macro. +// +// Due to differences in positioning requirements between the old, compiler +// specific __attribute__ syntax and the now standard [[maybe_unused]], this +// macro does not attempt to take advantage of '[[maybe_unused]]'. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif + +// ABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// +// Instructs the compiler not to use natural alignment for a tagged data +// structure, but instead to reduce its alignment to 1. +// +// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing +// so can cause atomic variables to be mis-aligned and silently violate +// atomicity on x86. +// +// This attribute can either be applied to members of a structure or to a +// structure in its entirety. Applying this attribute (judiciously) to a +// structure in its entirety to optimize the memory footprint of very +// commonly-used structs is fine. Do not apply this attribute to a structure in +// its entirety if the purpose is to control the offsets of the members in the +// structure. Instead, apply this attribute only to structure members that need +// it. +// +// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the +// natural alignment of structure members not annotated is preserved. Aligned +// member accesses are faster than non-aligned member accesses even if the +// targeted microprocessor supports non-aligned accesses. +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: When supported, GCC and Clang can issue a warning on switch labels +// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See +// clang documentation on language extensions for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has +// no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. + +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#else +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// For code or headers that are assured to only build with C++14 and up, prefer +// just using the standard `[[deprecated("message")]]` directly over this macro. +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// enum FooEnum { +// kBar ABSL_DEPRECATED("Use kBaz instead"), +// }; +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain +// turns this warning off by default, instead relying on clang-tidy to report +// new uses of deprecated code. +#if ABSL_HAVE_ATTRIBUTE(deprecated) +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#else +#define ABSL_DEPRECATED(message) +#endif + +// When deprecating Abseil code, it is sometimes necessary to turn off the +// warning within Abseil, until the deprecated code is actually removed. The +// deprecated code can be surrounded with these directives to achieve that +// result. +// +// class ABSL_DEPRECATED("Use Bar instead") Foo; +// +// ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING +// Baz ComputeBazFromFoo(Foo f); +// ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING +#if defined(__GNUC__) || defined(__clang__) +// Clang also supports these GCC pragmas. +#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \ + _Pragma("GCC diagnostic pop") +#else +#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING +#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING +#endif // defined(__GNUC__) || defined(__clang__) + +// ABSL_CONST_INIT +// +// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". +// +// This attribute must be placed on the initializing declaration of the +// variable. Some compilers will give a -Wmissing-constinit warning when this +// attribute is placed on some other declaration but missing from the +// initializing declaration. +// +// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can +// also be used in a non-initializing declaration to tell the compiler that a +// variable is already initialized, reducing overhead that would otherwise be +// incurred by a hidden guard variable. Thus annotating all declarations with +// this attribute is recommended to potentially enhance optimization. +// +// Example: +// +// class MyClass { +// public: +// ABSL_CONST_INIT static MyType my_var; +// }; +// +// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); +// +// For code or headers that are assured to only build with C++20 and up, prefer +// just using the standard `constinit` keyword directly over this macro. +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if defined(__cpp_constinit) && __cpp_constinit >= 201907L +#define ABSL_CONST_INIT constinit +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif + +// ABSL_ATTRIBUTE_PURE_FUNCTION +// +// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" +// functions. A function is pure if its return value is only a function of its +// arguments. The pure attribute prohibits a function from modifying the state +// of the program that is observable by means other than inspecting the +// function's return value. Declaring such functions with the pure attribute +// allows the compiler to avoid emitting some calls in repeated invocations of +// the function with the same argument values. +// +// Example: +// +// ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t); +#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] +#elif ABSL_HAVE_ATTRIBUTE(pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) +#else +// If the attribute isn't defined, we'll fallback to ABSL_MUST_USE_RESULT since +// pure functions are useless if its return is ignored. +#define ABSL_ATTRIBUTE_PURE_FUNCTION ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_CONST_FUNCTION +// +// ABSL_ATTRIBUTE_CONST_FUNCTION is used to annotate declarations of "const" +// functions. A const function is similar to a pure function, with one +// exception: Pure functions may return value that depend on a non-volatile +// object that isn't provided as a function argument, while the const function +// is guaranteed to return the same result given the same arguments. +// +// Example: +// +// ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Milliseconds(Duration d); +#if defined(_MSC_VER) && !defined(__clang__) +// Put the MSVC case first since MSVC seems to parse const as a C++ keyword. +#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::const) +#define ABSL_ATTRIBUTE_CONST_FUNCTION [[gnu::const]] +#elif ABSL_HAVE_ATTRIBUTE(const) +#define ABSL_ATTRIBUTE_CONST_FUNCTION __attribute__((const)) +#else +// Since const functions are more restrictive pure function, we'll fallback to a +// pure function if the const attribute is not handled. +#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION +#endif + +// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function +// parameter or implicit object parameter is retained by the return value of the +// annotated function (or, for a parameter of a constructor, in the value of the +// constructed object). This attribute causes warnings to be produced if a +// temporary object does not live long enough. +// +// When applied to a reference parameter, the referenced object is assumed to be +// retained by the return value of the function. When applied to a non-reference +// parameter (for example, a pointer or a class type), all temporaries +// referenced by the parameter are assumed to be retained by the return value of +// the function. +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] +#elif ABSL_HAVE_ATTRIBUTE(lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) +#else +#define ABSL_ATTRIBUTE_LIFETIME_BOUND +#endif + +// ABSL_ATTRIBUTE_TRIVIAL_ABI +// Indicates that a type is "trivially relocatable" -- meaning it can be +// relocated without invoking the constructor/destructor, using a form of move +// elision. +// +// From a memory safety point of view, putting aside destructor ordering, it's +// safe to apply ABSL_ATTRIBUTE_TRIVIAL_ABI if an object's location +// can change over the course of its lifetime: if a constructor can be run one +// place, and then the object magically teleports to another place where some +// methods are run, and then the object teleports to yet another place where it +// is destroyed. This is notably not true for self-referential types, where the +// move-constructor must keep the self-reference up to date. If the type changed +// location without invoking the move constructor, it would have a dangling +// self-reference. +// +// The use of this teleporting machinery means that the number of paired +// move/destroy operations can change, and so it is a bad idea to apply this to +// a type meant to count the number of moves. +// +// Warning: applying this can, rarely, break callers. Objects passed by value +// will be destroyed at the end of the call, instead of the end of the +// full-expression containing the call. In addition, it changes the ABI +// of functions accepting this type by value (e.g. to pass in registers). +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// +// b/321691395 - This is currently disabled in open-source builds since +// compiler support differs. If system libraries compiled with GCC are mixed +// with libraries compiled with Clang, types will have different ideas about +// their ABI, leading to hard to debug crashes. +#define ABSL_ATTRIBUTE_TRIVIAL_ABI + +// ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS +// +// Indicates a data member can be optimized to occupy no space (if it is empty) +// and/or its tail padding can be used for other members. +// +// For code that is assured to only build with C++20 or later, prefer using +// the standard attribute `[[no_unique_address]]` directly instead of this +// macro. +// +// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address +// Current versions of MSVC have disabled `[[no_unique_address]]` since it +// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for +// situations when it can be assured that it is desired. Since Abseil does not +// claim ABI compatibility in mixed builds, we can offer it unconditionally. +#if defined(_MSC_VER) && _MSC_VER >= 1929 +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address) +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS +#endif + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/third_party/absl/absl/base/bit_cast_test.cc b/third_party/absl/absl/base/bit_cast_test.cc new file mode 100644 index 000000000..8a3a41ea0 --- /dev/null +++ b/third_party/absl/absl/base/bit_cast_test.cc @@ -0,0 +1,109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Unit test for bit_cast template. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/casts.h" +#include "absl/base/macros.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +template +struct marshall { char buf[N]; }; + +template +void TestMarshall(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + marshall m0 = absl::bit_cast >(t0); + T t1 = absl::bit_cast(m0); + marshall m1 = absl::bit_cast >(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T))); + } +} + +// Convert back and forth to an integral type. The C++ standard does +// not guarantee this will work, but we test that this works on all the +// platforms we support. +// +// Likewise, we below make assumptions about sizeof(float) and +// sizeof(double) which the standard does not guarantee, but which hold on the +// platforms we support. + +template +void TestIntegral(const T values[], int num_values) { + for (int i = 0; i < num_values; ++i) { + T t0 = values[i]; + I i0 = absl::bit_cast(t0); + T t1 = absl::bit_cast(i0); + I i1 = absl::bit_cast(t1); + ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); + ASSERT_EQ(i0, i1); + } +} + +TEST(BitCast, Bool) { + static const bool bool_list[] = { false, true }; + TestMarshall(bool_list, ABSL_ARRAYSIZE(bool_list)); +} + +TEST(BitCast, Int32) { + static const int32_t int_list[] = + { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 }; + TestMarshall(int_list, ABSL_ARRAYSIZE(int_list)); +} + +TEST(BitCast, Int64) { + static const int64_t int64_list[] = + { 0, 1, 1LL << 40, -1, -(1LL<<40) }; + TestMarshall(int64_list, ABSL_ARRAYSIZE(int64_list)); +} + +TEST(BitCast, Uint64) { + static const uint64_t uint64_list[] = + { 0, 1, 1LLU << 40, 1LLU << 63 }; + TestMarshall(uint64_list, ABSL_ARRAYSIZE(uint64_list)); +} + +TEST(BitCast, Float) { + static const float float_list[] = + { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f, + 1e10f, 1e20f, 1e-10f, 1e-20f, + 2.71828f, 3.14159f }; + TestMarshall(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); + TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); +} + +TEST(BitCast, Double) { + static const double double_list[] = + { 0.0, 1.0, -1.0, 10.0, -10.0, + 1e10, 1e100, 1e-10, 1e-100, + 2.718281828459045, + 3.141592653589793238462643383279502884197169399375105820974944 }; + TestMarshall(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); + TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/call_once.h b/third_party/absl/absl/base/call_once.h new file mode 100644 index 000000000..7b0e69ccd --- /dev/null +++ b/third_party/absl/absl/base/call_once.h @@ -0,0 +1,225 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/macros.h" +#include "absl/base/nullability.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class once_flag; + +namespace base_internal { +absl::Nonnull*> ControlWord( + absl::Nonnull flag); +} // namespace base_internal + +// call_once() +// +// For all invocations using a given `once_flag`, invokes a given `fn` exactly +// once across all threads. The first call to `call_once()` with a particular +// `once_flag` argument (that does not throw an exception) will run the +// specified function with the provided `args`; other calls with the same +// `once_flag` argument will not run the function, but will wait +// for the provided function to finish running (if it is still running). +// +// This mechanism provides a safe, simple, and fast mechanism for one-time +// initialization in a multi-threaded process. +// +// Example: +// +// class MyInitClass { +// public: +// ... +// mutable absl::once_flag once_; +// +// MyInitClass* init() const { +// absl::call_once(once_, &MyInitClass::Init, this); +// return ptr_; +// } +// +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + +// once_flag +// +// Objects of this type are used to distinguish calls to `call_once()` and +// ensure the provided function is only invoked once across all threads. This +// type is not copyable or movable. However, it has a `constexpr` +// constructor, and is safe to use as a namespace-scoped global variable. +class once_flag { + public: + constexpr once_flag() : control_(0) {} + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend absl::Nonnull*> base_internal::ControlWord( + absl::Nonnull flag); + std::atomic control_; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +// Implementation details follow. +//------------------------------------------------------------------------------ + +namespace base_internal { + +// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to +// initialize entities used by the scheduler implementation. +template +void LowLevelCallOnce(absl::Nonnull flag, Callable&& fn, + Args&&... args); + +// Disables scheduling while on stack when scheduling mode is non-cooperative. +// No effect for cooperative scheduling modes. +class SchedulingHelper { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_ = false; +}; + +// Bit patterns for call_once state machine values. Internal implementation +// detail, not for use by clients. +// +// The bit patterns are arbitrarily chosen from unlikely values, to aid in +// debugging. However, kOnceInit must be 0, so that a zero-initialized +// once_flag will be valid for immediate use. +enum { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number +}; + +template +ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl( + absl::Nonnull*> control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, + std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, + scheduling_mode) == kOnceInit) { + base_internal::invoke(std::forward(fn), + std::forward(args)...); + old_control = + control->exchange(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone +} + +inline absl::Nonnull*> ControlWord( + absl::Nonnull flag) { + return &flag->control_; +} + +template +void LowLevelCallOnce(absl::Nonnull flag, Callable&& fn, + Args&&... args) { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, + std::forward(fn), + std::forward(args)...); + } +} + +} // namespace base_internal + +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, + std::forward(fn), std::forward(args)...); + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/third_party/absl/absl/base/call_once_test.cc b/third_party/absl/absl/base/call_once_test.cc new file mode 100644 index 000000000..11d26c44d --- /dev/null +++ b/third_party/absl/absl/base/call_once_test.cc @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/call_once.h" + +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +absl::once_flag once; + +ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit); + +int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0; +int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0; +bool done_blocking ABSL_GUARDED_BY(counters_mu) = false; + +// Function to be called from absl::call_once. Waits for a notification. +void WaitAndIncrement() { + counters_mu.Lock(); + ++call_once_invoke_count; + counters_mu.Unlock(); + + counters_mu.LockWhen(Condition(&done_blocking)); + ++call_once_finished_count; + counters_mu.Unlock(); +} + +void ThreadBody() { + counters_mu.Lock(); + ++running_thread_count; + counters_mu.Unlock(); + + absl::call_once(once, WaitAndIncrement); + + counters_mu.Lock(); + ++call_once_return_count; + counters_mu.Unlock(); +} + +// Returns true if all threads are set up for the test. +bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) { + // All ten threads must be running, and WaitAndIncrement should be blocked. + return running_thread_count == 10 && call_once_invoke_count == 1; +} + +TEST(CallOnceTest, ExecutionCount) { + std::vector threads; + + // Start 10 threads all calling call_once on the same once_flag. + for (int i = 0; i < 10; ++i) { + threads.emplace_back(ThreadBody); + } + + + // Wait until all ten threads have started, and WaitAndIncrement has been + // invoked. + counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr)); + + // WaitAndIncrement should have been invoked by exactly one call_once() + // instance. That thread should be blocking on a notification, and all other + // call_once instances should be blocking as well. + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 0); + EXPECT_EQ(call_once_return_count, 0); + + // Allow WaitAndIncrement to finish executing. Once it does, the other + // call_once waiters will be unblocked. + done_blocking = true; + counters_mu.Unlock(); + + for (std::thread& thread : threads) { + thread.join(); + } + + counters_mu.Lock(); + EXPECT_EQ(call_once_invoke_count, 1); + EXPECT_EQ(call_once_finished_count, 1); + EXPECT_EQ(call_once_return_count, 10); + counters_mu.Unlock(); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/casts.h b/third_party/absl/absl/base/casts.h new file mode 100644 index 000000000..e0b11bbe4 --- /dev/null +++ b/third_party/absl/absl/base/casts.h @@ -0,0 +1,180 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include +#include +#include + +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implicit_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast(C); +// +// Such implicit cast chaining may be useful within template logic. +template +constexpr To implicit_cast(typename absl::internal::type_identity_t to) { + return to; +} + +// bit_cast() +// +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. +// +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong +// +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). +// +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +template < + typename Dest, typename Source, + typename std::enable_if::value && + std::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source& source) { + return __builtin_bit_cast(Dest, source); +} +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline Dest bit_cast(const Source& source) { + Dest dest; + memcpy(static_cast(std::addressof(dest)), + static_cast(std::addressof(source)), sizeof(dest)); + return dest; +} +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/third_party/absl/absl/base/config.h b/third_party/absl/absl/base/config.h new file mode 100644 index 000000000..3933a3dbe --- /dev/null +++ b/third_party/absl/absl/base/config.h @@ -0,0 +1,1004 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +// ABSL_INTERNAL_CPLUSPLUS_LANG +// +// MSVC does not set the value of __cplusplus correctly, but instead uses +// _MSVC_LANG as a stand-in. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +// +// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at +// times, for example: +// https://github.com/microsoft/vscode-cpptools/issues/1770 +// https://reviews.llvm.org/D70996 +// +// For this reason, this symbol is considered INTERNAL and code outside of +// Abseil must not use it. +#if defined(_MSVC_LANG) +#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG +#elif defined(__cplusplus) +#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus +#endif + +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L +// Include library feature test macros. +#include +#endif + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif + +#include "absl/base/options.h" +#include "absl/base/policy_checks.h" + +// Abseil long-term support (LTS) releases will define +// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the +// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the +// integer representing the patch-level for that release. +// +// For example, for LTS release version "20300401.2", this would give us +// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 +// +// These symbols will not be defined in non-LTS code. +// +// Abseil recommends that clients live-at-head. Therefore, if you are using +// these symbols to assert a minimum version requirement, we recommend you do it +// as +// +// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 +// #error Project foo requires Abseil LTS version >= 20300401 +// #endif +// +// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes +// live-at-head clients from the minimum version assertion. +// +// See https://abseil.io/about/releases for more information on Abseil release +// management. +// +// LTS releases can be obtained from +// https://github.com/abseil/abseil-cpp/releases. +#define ABSL_LTS_RELEASE_VERSION 20240116 +#define ABSL_LTS_RELEASE_PATCH_LEVEL 2 + +// Helper macro to convert a CPP variable to a string literal. +#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x +#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// ABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::Foo(). +// +// ABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \ + !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME) +#error options.h is misconfigured. +#endif + +// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor "" +#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 + +#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) + +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "not be empty."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); + +#endif + +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_NAMESPACE_BEGIN +#define ABSL_NAMESPACE_END +#define ABSL_INTERNAL_C_SYMBOL(x) x +#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 +#define ABSL_NAMESPACE_BEGIN \ + inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { +#define ABSL_NAMESPACE_END } +#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v +#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) +#define ABSL_INTERNAL_C_SYMBOL(x) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) +#else +#error options.h is misconfigured. +#endif + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +#ifdef __has_feature +#define ABSL_HAVE_FEATURE(f) __has_feature(f) +#else +#define ABSL_HAVE_FEATURE(f) 0 +#endif + +// Portable check for GCC minimum version: +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ + (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 +#endif + +#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ + (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux or Asylo when compiled with Clang or +// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif (defined(__linux__) || defined(__ASYLO__)) && \ + (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#else +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set +#else +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE +// +// Checks whether `std::is_trivially_copyable` is supported. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set +#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1 +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making ABSL_HAVE_FEATURE unreliable there. +// +#if ABSL_HAVE_FEATURE(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // ABSL_HAVE_INTRINSIC_INT128 + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. +#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) +// Clang >= 3.6 +#if ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // ABSL_HAVE_FEATURE(cxx_exceptions) +#elif defined(__clang__) +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ + !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly (Emscripten) __EMSCRIPTEN__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ + defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) || defined(__VXWORKS__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_GETCPU +// +// Checks whether sched_getcpu is available. +#ifdef ABSL_HAVE_SCHED_GETCPU +#error ABSL_HAVE_SCHED_GETCPU cannot be directly set +#elif defined(__linux__) +#define ABSL_HAVE_SCHED_GETCPU 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \ + defined(__VXWORKS__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__wasi__) +// WASI doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +// Signals don't exist on hexagon/QuRT +#elif defined(__hexagon__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS < 10.13 and iOS < 12 don't support , , or +// because the libc++ shared library shipped on the system doesn't have the +// requisite exported symbols. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been +// modified a few times, via +// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 +// and +// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a +// The second has the actually correct versions, thus, is what we copy here. +#if defined(__APPLE__) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)) +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#elif defined(__cpp_lib_any) && __cpp_lib_any >= 201606L +#define ABSL_HAVE_STD_ANY 1 +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_ANY 1 +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#elif defined(__cpp_lib_optional) && __cpp_lib_optional >= 202106L +#define ABSL_HAVE_STD_OPTIONAL 1 +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif + +// ABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef ABSL_HAVE_STD_VARIANT +#error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#elif defined(__cpp_lib_variant) && __cpp_lib_variant >= 201606L +#define ABSL_HAVE_STD_VARIANT 1 +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_VARIANT 1 +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// ABSL_HAVE_STD_ORDERING +// +// Checks whether C++20 std::{partial,weak,strong}_ordering are available. +// +// __cpp_lib_three_way_comparison is missing on libc++ +// (https://github.com/llvm/llvm-project/issues/73953) so treat it as defined +// when building in C++20 mode. +#ifdef ABSL_HAVE_STD_ORDERING +#error "ABSL_HAVE_STD_ORDERING cannot be directly set." +#elif (defined(__cpp_lib_three_way_comparison) && \ + __cpp_lib_three_way_comparison >= 201907L) || \ + (defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L) +#define ABSL_HAVE_STD_ORDERING 1 +#endif + +// ABSL_USES_STD_ANY +// +// Indicates whether absl::any is an alias for std::any. +#if !defined(ABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ANY == 0 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) +#undef ABSL_USES_STD_ANY +#elif ABSL_OPTION_USE_STD_ANY == 1 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) +#define ABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::optional is an alias for std::optional. +#if !defined(ABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) +#undef ABSL_USES_STD_OPTIONAL +#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) +#define ABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_VARIANT +// +// Indicates whether absl::variant is an alias for std::variant. +#if !defined(ABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) +#undef ABSL_USES_STD_VARIANT +#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) +#define ABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::string_view is an alias for std::string_view. +#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(ABSL_HAVE_STD_STRING_VIEW)) +#undef ABSL_USES_STD_STRING_VIEW +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(ABSL_HAVE_STD_STRING_VIEW)) +#define ABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_ORDERING +// +// Indicates whether absl::{partial,weak,strong}_ordering are aliases for the +// std:: ordering types. +#if !defined(ABSL_OPTION_USE_STD_ORDERING) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ORDERING == 0 || \ + (ABSL_OPTION_USE_STD_ORDERING == 2 && !defined(ABSL_HAVE_STD_ORDERING)) +#undef ABSL_USES_STD_ORDERING +#elif ABSL_OPTION_USE_STD_ORDERING == 1 || \ + (ABSL_OPTION_USE_STD_ORDERING == 2 && defined(ABSL_HAVE_STD_ORDERING)) +#define ABSL_USES_STD_ORDERING 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +// ABSL_INTERNAL_MANGLED_NS +// ABSL_INTERNAL_MANGLED_BACKREFERENCE +// +// Internal macros for building up mangled names in our internal fork of CCTZ. +// This implementation detail is only needed and provided for the MSVC build. +// +// These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is +// the mangled spelling of the `absl` namespace, and +// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing +// the proper count to skip past the CCTZ fork namespace names. (This number +// is one larger when there is an inline namespace name to skip.) +#if defined(_MSC_VER) +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_INTERNAL_MANGLED_NS "absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" +#else +#define ABSL_INTERNAL_MANGLED_NS \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" +#endif +#endif + +// ABSL_DLL +// +// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` +// so we can annotate symbols appropriately as being exported. When used in +// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so +// that consumers know the symbol is defined inside the DLL. In all other cases, +// the macro expands to nothing. +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_DLL) +#define ABSL_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_DLL) +#define ABSL_DLL __declspec(dllimport) +#else +#define ABSL_DLL +#endif +#else +#define ABSL_DLL +#endif // defined(_MSC_VER) + +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_TEST_DLL) +#define ABSL_TEST_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_TEST_DLL) +#define ABSL_TEST_DLL __declspec(dllimport) +#else +#define ABSL_TEST_DLL +#endif +#else +#define ABSL_TEST_DLL +#endif // defined(_MSC_VER) + +// ABSL_HAVE_MEMORY_SANITIZER +// +// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of +// a compiler instrumentation module and a run-time library. +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." +#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) +#define ABSL_HAVE_MEMORY_SANITIZER 1 +#endif + +// ABSL_HAVE_THREAD_SANITIZER +// +// ThreadSanitizer (TSan) is a fast data race detector. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_THREAD__) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(thread_sanitizer) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#endif + +// ABSL_HAVE_ADDRESS_SANITIZER +// +// AddressSanitizer (ASan) is a fast memory error detector. +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_ADDRESS__) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(address_sanitizer) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_HWADDRESS__) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_DATAFLOW_SANITIZER +// +// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis. +#ifdef ABSL_HAVE_DATAFLOW_SANITIZER +#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set." +#elif defined(DATAFLOW_SANITIZER) +// GCC provides no method for detecting the presence of the standalone +// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow +// should also use -DDATAFLOW_SANITIZER. +#define ABSL_HAVE_DATAFLOW_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(dataflow_sanitizer) +#define ABSL_HAVE_DATAFLOW_SANITIZER 1 +#endif + +// ABSL_HAVE_LEAK_SANITIZER +// +// LeakSanitizer (or lsan) is a detector of memory leaks. +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time +// whether the LeakSanitizer is potentially available. However, just because the +// LeakSanitizer is available does not mean it is active. Use the +// always-available run-time interface in //absl/debugging/leak_check.h for +// interacting with LeakSanitizer. +#ifdef ABSL_HAVE_LEAK_SANITIZER +#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." +#elif defined(LEAK_SANITIZER) +// GCC provides no method for detecting the presence of the standalone +// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also +// use -DLEAK_SANITIZER. +#define ABSL_HAVE_LEAK_SANITIZER 1 +// Clang standalone LeakSanitizer (-fsanitize=leak) +#elif ABSL_HAVE_FEATURE(leak_sanitizer) +#define ABSL_HAVE_LEAK_SANITIZER 1 +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) +// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. +#define ABSL_HAVE_LEAK_SANITIZER 1 +#endif + +// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// +// Class template argument deduction is a language feature added in C++17. +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." +#elif defined(__cpp_deduction_guides) +#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 +#endif + +// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// +// Prior to C++17, static constexpr variables defined in classes required a +// separate definition outside of the class body, for example: +// +// class Foo { +// static constexpr int kBar = 0; +// }; +// constexpr int Foo::kBar; +// +// In C++17, these variables defined in classes are considered inline variables, +// and the extra declaration is redundant. Since some compilers warn on the +// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used +// conditionally ignore them: +// +// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// constexpr int Foo::kBar; +// #endif +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L +#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 +#endif + +// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with +// RTTI support. +#ifdef ABSL_INTERNAL_HAS_RTTI +#error ABSL_INTERNAL_HAS_RTTI cannot be directly set +#elif ABSL_HAVE_FEATURE(cxx_rtti) +#define ABSL_INTERNAL_HAS_RTTI 1 +#elif defined(__GNUC__) && defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#elif defined(_MSC_VER) && defined(_CPPRTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#elif !defined(__GNUC__) && !defined(_MSC_VER) +// Unknown compiler, default to RTTI +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif + +// `ABSL_INTERNAL_HAS_CXA_DEMANGLE` determines whether `abi::__cxa_demangle` is +// available. +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set +#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__)) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0 +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && \ + (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \ + !defined(__mips__) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1 +#elif defined(__clang__) && !defined(_MSC_VER) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1 +#endif + +// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE +#error ABSL_INTERNAL_HAVE_SSE cannot be directly set +#elif defined(__SSE__) +#define ABSL_INTERNAL_HAVE_SSE 1 +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \ + !defined(_M_ARM64EC) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 +// indicates that at least SSE was targeted with the /arch:SSE option. +// All x86-64 processors support SSE, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE 1 +#endif + +// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set +#elif defined(__SSE2__) +#define ABSL_INTERNAL_HAVE_SSE2 1 +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \ + !defined(_M_ARM64EC) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 +// indicates that at least SSE2 was targeted with the /arch:SSE2 option. +// All x86-64 processors support SSE2, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE2 1 +#endif + +// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +// +// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 +// with MSVC requires either assuming that the code will only every run on CPUs +// that support SSSE3, otherwise __cpuid() can be used to detect support at +// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported +// by the CPU. +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set +#elif defined(__SSSE3__) +#define ABSL_INTERNAL_HAVE_SSSE3 1 +#endif + +// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM +// SIMD). +// +// If __CUDA_ARCH__ is defined, then we are compiling CUDA code in device mode. +// In device mode, NEON intrinsics are not available, regardless of host +// platform. +// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set +#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__) +#define ABSL_INTERNAL_HAVE_ARM_NEON 1 +#endif + +// ABSL_HAVE_CONSTANT_EVALUATED is used for compile-time detection of +// constant evaluation support through `absl::is_constant_evaluated`. +#ifdef ABSL_HAVE_CONSTANT_EVALUATED +#error ABSL_HAVE_CONSTANT_EVALUATED cannot be directly set +#endif +#ifdef __cpp_lib_is_constant_evaluated +#define ABSL_HAVE_CONSTANT_EVALUATED 1 +#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated) +#define ABSL_HAVE_CONSTANT_EVALUATED 1 +#endif + +// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros +// into an integer that can be compared against. +#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION +#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set +#endif +#ifdef __EMSCRIPTEN__ +#include +#ifdef __EMSCRIPTEN_major__ +#if __EMSCRIPTEN_minor__ >= 1000 +#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION +#endif +#if __EMSCRIPTEN_tiny__ >= 1000 +#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION +#endif +#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \ + ((__EMSCRIPTEN_major__) * 1000000 + (__EMSCRIPTEN_minor__) * 1000 + \ + (__EMSCRIPTEN_tiny__)) +#endif +#endif + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/third_party/absl/absl/base/config_test.cc b/third_party/absl/absl/base/config_test.cc new file mode 100644 index 000000000..7e0c033de --- /dev/null +++ b/third_party/absl/absl/base/config_test.cc @@ -0,0 +1,60 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" + +#include + +#include "gtest/gtest.h" +#include "absl/synchronization/internal/thread_pool.h" + +namespace { + +TEST(ConfigTest, Endianness) { + union { + uint32_t value; + uint8_t data[sizeof(uint32_t)]; + } number; + number.data[0] = 0x00; + number.data[1] = 0x01; + number.data[2] = 0x02; + number.data[3] = 0x03; +#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN) +#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined +#elif defined(ABSL_IS_LITTLE_ENDIAN) + EXPECT_EQ(UINT32_C(0x03020100), number.value); +#elif defined(ABSL_IS_BIG_ENDIAN) + EXPECT_EQ(UINT32_C(0x00010203), number.value); +#else +#error Unknown endianness +#endif +} + +#if defined(ABSL_HAVE_THREAD_LOCAL) +TEST(ConfigTest, ThreadLocal) { + static thread_local int mine_mine_mine = 16; + EXPECT_EQ(16, mine_mine_mine); + { + absl::synchronization_internal::ThreadPool pool(1); + pool.Schedule([&] { + EXPECT_EQ(16, mine_mine_mine); + mine_mine_mine = 32; + EXPECT_EQ(32, mine_mine_mine); + }); + } + EXPECT_EQ(16, mine_mine_mine); +} +#endif + +} // namespace diff --git a/third_party/absl/absl/base/const_init.h b/third_party/absl/absl/base/const_init.h new file mode 100644 index 000000000..16520b61d --- /dev/null +++ b/third_party/absl/absl/base/const_init.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl { +ABSL_NAMESPACE_BEGIN + +enum ConstInitType { + kConstInit, +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/third_party/absl/absl/base/dynamic_annotations.h b/third_party/absl/absl/base/dynamic_annotations.h new file mode 100644 index 000000000..7ba8912ea --- /dev/null +++ b/third_party/absl/absl/base/dynamic_annotations.h @@ -0,0 +1,496 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ABSL_ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#ifdef __cplusplus +#include "absl/base/macros.h" +#endif + +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#include +#endif + +// TODO(rogeeff): Remove after the backward compatibility period. +#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export + +// ------------------------------------------------------------------------- +// Decide which features are enabled. + +#ifdef ABSL_HAVE_THREAD_SANITIZER + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +#if defined(__clang__) +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 +#if !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif +#else +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#endif + +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED + +#endif // ABSL_HAVE_THREAD_SANITIZER + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based sanitizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateRWLockCreate(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockCreateStatic(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockDestroy(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockAcquired(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateBenignRace(const char* file, int line, + const volatile void* address, const char* description); +void AnnotateBenignRaceSized(const char* file, int line, + const volatile void* address, size_t size, + const char* description); +void AnnotateThreadName(const char* file, int line, const char* name); +void AnnotateEnableRaceDetection(const char* file, int line, int enable); +ABSL_INTERNAL_END_EXTERN_C + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ABSL_ANNOTATE_THREAD_NAME(name) // empty +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#ifdef ABSL_HAVE_MEMORY_SANITIZER + +#include + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) + +// TODO(rogeeff): remove this branch +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty + +#endif + +#endif // ABSL_HAVE_MEMORY_SANITIZER + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// Request the analysis tool to ignore all reads in the current thread until +// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ABSL_ANNOTATE_UNPROTECTED_READ. +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreReadsBegin(const char* file, int line) + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char* file, + int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +ABSL_INTERNAL_END_EXTERN_C + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ + () + +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ + () + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsBegin)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsEnd)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreWritesBegin(const char* file, int line); +void AnnotateIgnoreWritesEnd(const char* file, int line); +ABSL_INTERNAL_END_EXTERN_C + +#else + +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ABSL_ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ABSL_ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ABSL_ANNOTATE_IGNORE_READS_END(); + return res; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl +#endif + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + alignas(8) char x[8]; \ + } name + +#else + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// HWAddress sanitizer annotations + +#ifdef __cplusplus +namespace absl { +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +// Under HWASAN changes the tag of the pointer. +template +T* HwasanTagPointer(T* ptr, uintptr_t tag) { + return reinterpret_cast(__hwasan_tag_pointer(ptr, tag)); +} +#else +template +T* HwasanTagPointer(T* ptr, uintptr_t) { + return ptr; +} +#endif +} // namespace absl +#endif + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ diff --git a/third_party/absl/absl/base/exception_safety_testing_test.cc b/third_party/absl/absl/base/exception_safety_testing_test.cc new file mode 100644 index 000000000..bf5aa7cf8 --- /dev/null +++ b/third_party/absl/absl/base/exception_safety_testing_test.cc @@ -0,0 +1,962 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" + +namespace testing { + +namespace { + +using ::testing::exceptions_internal::SetCountdown; +using ::testing::exceptions_internal::TestException; +using ::testing::exceptions_internal::UnsetCountdown; + +// EXPECT_NO_THROW can't inspect the thrown inspection in general. +template +void ExpectNoThrow(const F& f) { + try { + f(); + } catch (const TestException& e) { + ADD_FAILURE() << "Unexpected exception thrown from " << e.what(); + } +} + +TEST(ThrowingValueTest, Throws) { + SetCountdown(); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + // It's not guaranteed that every operator only throws *once*. The default + // ctor only throws once, though, so use it to make sure we only throw when + // the countdown hits 0 + SetCountdown(2); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + ExpectNoThrow([]() { ThrowingValue<> bomb; }); + EXPECT_THROW(ThrowingValue<> bomb, TestException); + + UnsetCountdown(); +} + +// Tests that an operation throws when the countdown is at 0, doesn't throw when +// the countdown doesn't hit 0, and doesn't modify the state of the +// ThrowingValue if it throws +template +void TestOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), TestException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCtors) { + ThrowingValue<> bomb; + + TestOp([]() { ThrowingValue<> bomb(1); }); + TestOp([&]() { ThrowingValue<> bomb1 = bomb; }); + TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); }); +} + +TEST(ThrowingValueTest, ThrowingAssignment) { + ThrowingValue<> bomb, bomb1; + + TestOp([&]() { bomb = bomb1; }); + TestOp([&]() { bomb = std::move(bomb1); }); + + // Test that when assignment throws, the assignment should fail (lhs != rhs) + // and strong guarantee fails (lhs != lhs_copy). + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs); + SetCountdown(); + EXPECT_THROW(lhs = rhs, TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs); + EXPECT_NE(lhs_copy, lhs); + } + { + ThrowingValue<> lhs(39), rhs(42); + ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs); + SetCountdown(); + EXPECT_THROW(lhs = std::move(rhs), TestException); + UnsetCountdown(); + EXPECT_NE(lhs, rhs_copy); + EXPECT_NE(lhs_copy, lhs); + } +} + +TEST(ThrowingValueTest, ThrowingComparisons) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { return bomb1 == bomb2; }); + TestOp([&]() { return bomb1 != bomb2; }); + TestOp([&]() { return bomb1 < bomb2; }); + TestOp([&]() { return bomb1 <= bomb2; }); + TestOp([&]() { return bomb1 > bomb2; }); + TestOp([&]() { return bomb1 >= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingArithmeticOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&bomb1]() { +bomb1; }); + TestOp([&bomb1]() { -bomb1; }); + TestOp([&bomb1]() { ++bomb1; }); + TestOp([&bomb1]() { bomb1++; }); + TestOp([&bomb1]() { --bomb1; }); + TestOp([&bomb1]() { bomb1--; }); + + TestOp([&]() { bomb1 + bomb2; }); + TestOp([&]() { bomb1 - bomb2; }); + TestOp([&]() { bomb1* bomb2; }); + TestOp([&]() { bomb1 / bomb2; }); + TestOp([&]() { bomb1 << 1; }); + TestOp([&]() { bomb1 >> 1; }); +} + +TEST(ThrowingValueTest, ThrowingLogicalOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { !bomb1; }); + TestOp([&]() { bomb1&& bomb2; }); + TestOp([&]() { bomb1 || bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingBitwiseOps) { + ThrowingValue<> bomb1, bomb2; + + TestOp([&bomb1]() { ~bomb1; }); + TestOp([&]() { bomb1 & bomb2; }); + TestOp([&]() { bomb1 | bomb2; }); + TestOp([&]() { bomb1 ^ bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) { + ThrowingValue<> bomb1(1), bomb2(2); + + TestOp([&]() { bomb1 += bomb2; }); + TestOp([&]() { bomb1 -= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); + TestOp([&]() { bomb1 /= bomb2; }); + TestOp([&]() { bomb1 %= bomb2; }); + TestOp([&]() { bomb1 &= bomb2; }); + TestOp([&]() { bomb1 |= bomb2; }); + TestOp([&]() { bomb1 ^= bomb2; }); + TestOp([&]() { bomb1 *= bomb2; }); +} + +TEST(ThrowingValueTest, ThrowingStreamOps) { + ThrowingValue<> bomb; + + TestOp([&]() { + std::istringstream stream; + stream >> bomb; + }); + TestOp([&]() { + std::stringstream stream; + stream << bomb; + }); +} + +// Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit +// a nonfatal failure that contains the string representation of the Thrower +TEST(ThrowingValueTest, StreamOpsOutput) { + using ::testing::TypeSpec; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + + // Test default spec list (kEverythingThrows) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue; + auto thrower = Thrower(123); + thrower.~Thrower(); + }, + "ThrowingValue<>(123)"); + + // Test with one item in spec list (kNoThrowCopy) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue; + auto thrower = Thrower(234); + thrower.~Thrower(); + }, + "ThrowingValue(234)"); + + // Test with multiple items in spec list (kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = + ThrowingValue; + auto thrower = Thrower(345); + thrower.~Thrower(); + }, + "ThrowingValue(345)"); + + // Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew) + EXPECT_NONFATAL_FAILURE( + { + using Thrower = ThrowingValue(-1)>; + auto thrower = Thrower(456); + thrower.~Thrower(); + }, + "ThrowingValue(456)"); +} + +template +void TestAllocatingOp(const F& f) { + ExpectNoThrow(f); + + SetCountdown(); + EXPECT_THROW(f(), exceptions_internal::TestBadAllocException); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingAllocatingOps) { + // make_unique calls unqualified operator new, so these exercise the + // ThrowingValue overloads. + TestAllocatingOp([]() { return absl::make_unique>(1); }); + TestAllocatingOp([]() { return absl::make_unique[]>(2); }); +} + +TEST(ThrowingValueTest, NonThrowingMoveCtor) { + ThrowingValue nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue nothrow1 = std::move(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingMoveAssign) { + ThrowingValue nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = std::move(nothrow_assign2); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingCopyCtor) { + ThrowingValue<> tv; + + TestOp([&]() { ThrowingValue<> tv_copy(tv); }); +} + +TEST(ThrowingValueTest, ThrowingCopyAssign) { + ThrowingValue<> tv1, tv2; + + TestOp([&]() { tv1 = tv2; }); +} + +TEST(ThrowingValueTest, NonThrowingCopyCtor) { + ThrowingValue nothrow_ctor; + + SetCountdown(); + ExpectNoThrow([¬hrow_ctor]() { + ThrowingValue nothrow1(nothrow_ctor); + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingCopyAssign) { + ThrowingValue nothrow_assign1, nothrow_assign2; + + SetCountdown(); + ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { + nothrow_assign1 = nothrow_assign2; + }); + UnsetCountdown(); +} + +TEST(ThrowingValueTest, ThrowingSwap) { + ThrowingValue<> bomb1, bomb2; + TestOp([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingSwap) { + ThrowingValue bomb1, bomb2; + ExpectNoThrow([&]() { std::swap(bomb1, bomb2); }); +} + +TEST(ThrowingValueTest, NonThrowingAllocation) { + ThrowingValue* allocated; + ThrowingValue* array; + + ExpectNoThrow([&allocated]() { + allocated = new ThrowingValue(1); + delete allocated; + }); + ExpectNoThrow([&array]() { + array = new ThrowingValue[2]; + delete[] array; + }); +} + +TEST(ThrowingValueTest, NonThrowingDelete) { + auto* allocated = new ThrowingValue<>(1); + auto* array = new ThrowingValue<>[2]; + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + SetCountdown(); + ExpectNoThrow([array]() { delete[] array; }); + + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingPlacementDelete) { + constexpr int kArrayLen = 2; + // We intentionally create extra space to store the tag allocated by placement + // new[]. + constexpr size_t kExtraSpaceLen = sizeof(size_t) * 2; + + alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)]; + alignas(ThrowingValue<>) unsigned char + array_buf[kExtraSpaceLen + sizeof(ThrowingValue<>[kArrayLen])]; + auto* placed = new (&buf) ThrowingValue<>(1); + auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen]; + auto* placed_array_end = reinterpret_cast(placed_array) + + sizeof(ThrowingValue<>[kArrayLen]); + EXPECT_LE(placed_array_end, array_buf + sizeof(array_buf)); + + SetCountdown(); + ExpectNoThrow([placed, &buf]() { + placed->~ThrowingValue<>(); + ThrowingValue<>::operator delete(placed, &buf); + }); + + SetCountdown(); + ExpectNoThrow([&, placed_array]() { + for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>(); + ThrowingValue<>::operator delete[](placed_array, &array_buf); + }); + + UnsetCountdown(); +} + +TEST(ThrowingValueTest, NonThrowingDestructor) { + auto* allocated = new ThrowingValue<>(); + + SetCountdown(); + ExpectNoThrow([allocated]() { delete allocated; }); + UnsetCountdown(); +} + +TEST(ThrowingBoolTest, ThrowingBool) { + ThrowingBool t = true; + + // Test that it's contextually convertible to bool + if (t) { // NOLINT(whitespace/empty_if_body) + } + EXPECT_TRUE(t); + + TestOp([&]() { (void)!t; }); +} + +TEST(ThrowingAllocatorTest, MemoryManagement) { + // Just exercise the memory management capabilities under LSan to make sure we + // don't leak. + ThrowingAllocator int_alloc; + int* ip = int_alloc.allocate(1); + int_alloc.deallocate(ip, 1); + int* i_array = int_alloc.allocate(2); + int_alloc.deallocate(i_array, 2); + + ThrowingAllocator> tv_alloc; + ThrowingValue<>* ptr = tv_alloc.allocate(1); + tv_alloc.deallocate(ptr, 1); + ThrowingValue<>* tv_array = tv_alloc.allocate(2); + tv_alloc.deallocate(tv_array, 2); +} + +TEST(ThrowingAllocatorTest, CallsGlobalNew) { + ThrowingAllocator, AllocSpec::kNoThrowAllocate> nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + // This will only throw if ThrowingValue::new is called. + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, ThrowingConstructors) { + ThrowingAllocator int_alloc; + int* ip = nullptr; + + SetCountdown(); + EXPECT_THROW(ip = int_alloc.allocate(1), TestException); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + *ip = 1; + SetCountdown(); + EXPECT_THROW(int_alloc.construct(ip, 2), TestException); + EXPECT_EQ(*ip, 1); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); +} + +TEST(ThrowingAllocatorTest, NonThrowingConstruction) { + { + ThrowingAllocator int_alloc; + int* ip = nullptr; + + SetCountdown(); + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator int_alloc; + int* ip = nullptr; + ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); + ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); + EXPECT_EQ(*ip, 2); + int_alloc.deallocate(ip, 1); + } + + { + ThrowingAllocator, AllocSpec::kNoThrowAllocate> + nothrow_alloc; + ThrowingValue<>* ptr; + + SetCountdown(); + ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); + + SetCountdown(); + ExpectNoThrow( + [&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); }); + + EXPECT_EQ(ptr->Get(), 2); + nothrow_alloc.destroy(ptr); + nothrow_alloc.deallocate(ptr, 1); + + UnsetCountdown(); + } + + { + ThrowingAllocator a; + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator a1 = a; }); + + SetCountdown(); + ExpectNoThrow([&]() { ThrowingAllocator a1 = std::move(a); }); + + UnsetCountdown(); + } +} + +TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) { + ThrowingAllocator a; + TestOp([]() { ThrowingAllocator a; }); + TestOp([&]() { a.select_on_container_copy_construction(); }); +} + +TEST(ThrowingAllocatorTest, State) { + ThrowingAllocator a1, a2; + EXPECT_NE(a1, a2); + + auto a3 = a1; + EXPECT_EQ(a3, a1); + int* ip = a1.allocate(1); + EXPECT_EQ(a3, a1); + a3.deallocate(ip, 1); + EXPECT_EQ(a3, a1); +} + +TEST(ThrowingAllocatorTest, InVector) { + std::vector, ThrowingAllocator>> v; + for (int i = 0; i < 20; ++i) v.push_back({}); + for (int i = 0; i < 20; ++i) v.pop_back(); +} + +TEST(ThrowingAllocatorTest, InList) { + std::list, ThrowingAllocator>> l; + for (int i = 0; i < 20; ++i) l.push_back({}); + for (int i = 0; i < 20; ++i) l.pop_back(); + for (int i = 0; i < 20; ++i) l.push_front({}); + for (int i = 0; i < 20; ++i) l.pop_front(); +} + +template +struct NullaryTestValidator : public std::false_type {}; + +template +struct NullaryTestValidator< + TesterInstance, + absl::void_t().Test())>> + : public std::true_type {}; + +template +bool HasNullaryTest(const TesterInstance&) { + return NullaryTestValidator::value; +} + +void DummyOp(void*) {} + +template +struct UnaryTestValidator : public std::false_type {}; + +template +struct UnaryTestValidator< + TesterInstance, + absl::void_t().Test(DummyOp))>> + : public std::true_type {}; + +template +bool HasUnaryTest(const TesterInstance&) { + return UnaryTestValidator::value; +} + +TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) { + using T = exceptions_internal::UninitializedT; + auto op = [](T* t) {}; + auto inv = [](T*) { return testing::AssertionSuccess(); }; + auto fac = []() { return absl::make_unique(); }; + + // Test that providing operation and inveriants still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks a factory + auto without_fac = + testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts( + inv, testing::strong_guarantee); + EXPECT_FALSE(HasNullaryTest(without_fac)); + EXPECT_FALSE(HasUnaryTest(without_fac)); + + // Test that providing contracts and factory allows the invocation of + // .Test(op) but does not allow for .Test() because it lacks an operation + auto without_op = testing::MakeExceptionSafetyTester() + .WithContracts(inv, testing::strong_guarantee) + .WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_op)); + EXPECT_TRUE(HasUnaryTest(without_op)); + + // Test that providing operation and factory still does not allow for the + // the invocation of .Test() and .Test(op) because it lacks contracts + auto without_inv = + testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac); + EXPECT_FALSE(HasNullaryTest(without_inv)); + EXPECT_FALSE(HasUnaryTest(without_inv)); +} + +struct ExampleStruct {}; + +std::unique_ptr ExampleFunctionFactory() { + return absl::make_unique(); +} + +void ExampleFunctionOperation(ExampleStruct*) {} + +testing::AssertionResult ExampleFunctionContract(ExampleStruct*) { + return testing::AssertionSuccess(); +} + +struct { + std::unique_ptr operator()() const { + return ExampleFunctionFactory(); + } +} example_struct_factory; + +struct { + void operator()(ExampleStruct*) const {} +} example_struct_operation; + +struct { + testing::AssertionResult operator()(ExampleStruct* example_struct) const { + return ExampleFunctionContract(example_struct); + } +} example_struct_contract; + +auto example_lambda_factory = []() { return ExampleFunctionFactory(); }; + +auto example_lambda_operation = [](ExampleStruct*) {}; + +auto example_lambda_contract = [](ExampleStruct* example_struct) { + return ExampleFunctionContract(example_struct); +}; + +// Testing that function references, pointers, structs with operator() and +// lambdas can all be used with ExceptionSafetyTester +TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) { + // function reference + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(ExampleFunctionFactory) + .WithOperation(ExampleFunctionOperation) + .WithContracts(ExampleFunctionContract) + .Test()); + + // function pointer + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(&ExampleFunctionFactory) + .WithOperation(&ExampleFunctionOperation) + .WithContracts(&ExampleFunctionContract) + .Test()); + + // struct + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_struct_factory) + .WithOperation(example_struct_operation) + .WithContracts(example_struct_contract) + .Test()); + + // lambda + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithFactory(example_lambda_factory) + .WithOperation(example_lambda_operation) + .WithContracts(example_lambda_contract) + .Test()); +} + +struct NonNegative { + bool operator==(const NonNegative& other) const { return i == other.i; } + int i; +}; + +testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) { + if (g->i >= 0) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be non-negative but is " << g->i; +} + +struct { + template + void operator()(T* t) const { + (*t)(); + } +} invoker; + +auto tester = + testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts( + CheckNonNegativeInvariants); +auto strong_tester = tester.WithContracts(testing::strong_guarantee); + +struct FailsBasicGuarantee : public NonNegative { + void operator()() { + --i; + ThrowingValue<> bomb; + ++i; + } +}; + +TEST(ExceptionCheckTest, BasicGuaranteeFailure) { + EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test()); +} + +struct FollowsBasicGuarantee : public NonNegative { + void operator()() { + ++i; + ThrowingValue<> bomb; + } +}; + +TEST(ExceptionCheckTest, BasicGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +TEST(ExceptionCheckTest, StrongGuaranteeFailure) { + EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test()); + EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); +} + +struct BasicGuaranteeWithExtraContracts : public NonNegative { + // After operator(), i is incremented. If operator() throws, i is set to 9999 + void operator()() { + int old_i = i; + i = kExceptionSentinel; + ThrowingValue<> bomb; + i = ++old_i; + } + + static constexpr int kExceptionSentinel = 9999; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; +#endif + +TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { + auto tester_with_val = + tester.WithInitialValue(BasicGuaranteeWithExtraContracts{}); + EXPECT_TRUE(tester_with_val.Test()); + EXPECT_TRUE( + tester_with_val + .WithContracts([](BasicGuaranteeWithExtraContracts* o) { + if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) { + return testing::AssertionSuccess(); + } + return testing::AssertionFailure() + << "i should be " + << BasicGuaranteeWithExtraContracts::kExceptionSentinel + << ", but is " << o->i; + }) + .Test()); +} + +struct FollowsStrongGuarantee : public NonNegative { + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, StrongGuarantee) { + EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); +} + +struct HasReset : public NonNegative { + void operator()() { + i = -1; + ThrowingValue<> bomb; + i = 1; + } + + void reset() { i = 0; } +}; + +testing::AssertionResult CheckHasResetContracts(HasReset* h) { + h->reset(); + return testing::AssertionResult(h->i == 0); +} + +TEST(ExceptionCheckTest, ModifyingChecker) { + auto set_to_1000 = [](FollowsBasicGuarantee* g) { + g->i = 1000; + return testing::AssertionSuccess(); + }; + auto is_1000 = [](FollowsBasicGuarantee* g) { + return testing::AssertionResult(g->i == 1000); + }; + auto increment = [](FollowsStrongGuarantee* g) { + ++g->i; + return testing::AssertionSuccess(); + }; + + EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{}) + .WithContracts(set_to_1000, is_1000) + .Test()); + EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}) + .WithContracts(increment) + .Test()); + EXPECT_TRUE(testing::MakeExceptionSafetyTester() + .WithInitialValue(HasReset{}) + .WithContracts(CheckHasResetContracts) + .Test(invoker)); +} + +TEST(ExceptionSafetyTesterTest, ResetsCountdown) { + auto test = + testing::MakeExceptionSafetyTester() + .WithInitialValue(ThrowingValue<>()) + .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); }) + .WithOperation([](ThrowingValue<>*) {}); + ASSERT_TRUE(test.Test()); + // If the countdown isn't reset because there were no exceptions thrown, then + // this will fail with a termination from an unhandled exception + EXPECT_TRUE(test.Test()); +} + +struct NonCopyable : public NonNegative { + NonCopyable(const NonCopyable&) = delete; + NonCopyable() : NonNegative{0} {} + + void operator()() { ThrowingValue<> bomb; } +}; + +TEST(ExceptionCheckTest, NonCopyable) { + auto factory = []() { return absl::make_unique(); }; + EXPECT_TRUE(tester.WithFactory(factory).Test()); + EXPECT_TRUE(strong_tester.WithFactory(factory).Test()); +} + +struct NonEqualityComparable : public NonNegative { + void operator()() { ThrowingValue<> bomb; } + + void ModifyOnThrow() { + ++i; + ThrowingValue<> bomb; + static_cast(bomb); + --i; + } +}; + +TEST(ExceptionCheckTest, NonEqualityComparable) { + auto nec_is_strong = [](NonEqualityComparable* nec) { + return testing::AssertionResult(nec->i == NonEqualityComparable().i); + }; + auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{}) + .WithContracts(nec_is_strong); + + EXPECT_TRUE(strong_nec_tester.Test()); + EXPECT_FALSE(strong_nec_tester.Test( + [](NonEqualityComparable* n) { n->ModifyOnThrow(); })); +} + +template +struct ExhaustivenessTester { + void operator()() { + successes |= 1; + T b1; + static_cast(b1); + successes |= (1 << 1); + T b2; + static_cast(b2); + successes |= (1 << 2); + T b3; + static_cast(b3); + successes |= (1 << 3); + } + + bool operator==(const ExhaustivenessTester>&) const { + return true; + } + + static unsigned char successes; +}; + +struct { + template + testing::AssertionResult operator()(ExhaustivenessTester*) const { + return testing::AssertionSuccess(); + } +} CheckExhaustivenessTesterContracts; + +template +unsigned char ExhaustivenessTester::successes = 0; + +TEST(ExceptionCheckTest, Exhaustiveness) { + auto exhaust_tester = testing::MakeExceptionSafetyTester() + .WithContracts(CheckExhaustivenessTesterContracts) + .WithOperation(invoker); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester{}).Test()); + EXPECT_EQ(ExhaustivenessTester::successes, 0xF); + + EXPECT_TRUE( + exhaust_tester.WithInitialValue(ExhaustivenessTester>{}) + .WithContracts(testing::strong_guarantee) + .Test()); + EXPECT_EQ(ExhaustivenessTester>::successes, 0xF); +} + +struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject { + LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) { + ++counter; + ThrowingValue<> v; + static_cast(v); + --counter; + } + LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept + : TrackedObject(ABSL_PRETTY_FUNCTION) {} + static int counter; +}; +int LeaksIfCtorThrows::counter = 0; + +TEST(ExceptionCheckTest, TestLeakyCtor) { + testing::TestThrowingCtor(); + EXPECT_EQ(LeaksIfCtorThrows::counter, 1); + LeaksIfCtorThrows::counter = 0; +} + +struct Tracked : private exceptions_internal::TrackedObject { + Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {} +}; + +TEST(ConstructorTrackerTest, CreatedBefore) { + Tracked a, b, c; + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); +} + +TEST(ConstructorTrackerTest, CreatedAfter) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + Tracked a, b, c; +} + +TEST(ConstructorTrackerTest, NotDestroyedAfter) { + alignas(Tracked) unsigned char storage[sizeof(Tracked)]; + EXPECT_NONFATAL_FAILURE( + { + exceptions_internal::ConstructorTracker ct( + exceptions_internal::countdown); + new (&storage) Tracked(); + }, + "not destroyed"); +} + +TEST(ConstructorTrackerTest, DestroyedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + EXPECT_NONFATAL_FAILURE( + { + Tracked t; + t.~Tracked(); + }, + "re-destroyed"); +} + +TEST(ConstructorTrackerTest, ConstructedTwice) { + exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); + alignas(Tracked) unsigned char storage[sizeof(Tracked)]; + EXPECT_NONFATAL_FAILURE( + { + new (&storage) Tracked(); + new (&storage) Tracked(); + reinterpret_cast(&storage)->~Tracked(); + }, + "re-constructed"); +} + +TEST(ThrowingValueTraitsTest, RelationalOperators) { + ThrowingValue<> a, b; + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible::value)); + EXPECT_TRUE((std::is_convertible b), bool>::value)); + EXPECT_TRUE((std::is_convertible= b), bool>::value)); +} + +TEST(ThrowingAllocatorTraitsTest, Assignablility) { + EXPECT_TRUE(absl::is_move_assignable>::value); + EXPECT_TRUE(absl::is_copy_assignable>::value); + EXPECT_TRUE(std::is_nothrow_move_assignable>::value); + EXPECT_TRUE(std::is_nothrow_copy_assignable>::value); +} + +} // namespace + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third_party/absl/absl/base/inline_variable_test.cc b/third_party/absl/absl/base/inline_variable_test.cc new file mode 100644 index 000000000..37a40e1e4 --- /dev/null +++ b/third_party/absl/absl/base/inline_variable_test.cc @@ -0,0 +1,64 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/inline_variable_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { +namespace { + +TEST(InlineVariableTest, Constexpr) { + static_assert(inline_variable_foo.value == 5, ""); + static_assert(other_inline_variable_foo.value == 5, ""); + static_assert(inline_variable_int == 5, ""); + static_assert(other_inline_variable_int == 5, ""); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityEquality) { + EXPECT_EQ(get_foo_a().value, 5); + EXPECT_EQ(get_foo_b().value, 5); + EXPECT_EQ(&get_foo_a(), &get_foo_b()); +} + +TEST(InlineVariableTest, DefaultConstructedIdentityInequality) { + EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo); +} + +TEST(InlineVariableTest, InitializedIdentityEquality) { + EXPECT_EQ(get_int_a(), 5); + EXPECT_EQ(get_int_b(), 5); + EXPECT_EQ(&get_int_a(), &get_int_b()); +} + +TEST(InlineVariableTest, InitializedIdentityInequality) { + EXPECT_NE(&inline_variable_int, &other_inline_variable_int); +} + +TEST(InlineVariableTest, FunPtrType) { + static_assert( + std::is_same::type>::value, + ""); +} + +} // namespace +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/inline_variable_test_a.cc b/third_party/absl/absl/base/inline_variable_test_a.cc new file mode 100644 index 000000000..f96a58d9b --- /dev/null +++ b/third_party/absl/absl/base/inline_variable_test_a.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_a() { return inline_variable_foo; } + +const int& get_int_a() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/inline_variable_test_b.cc b/third_party/absl/absl/base/inline_variable_test_b.cc new file mode 100644 index 000000000..038adc30a --- /dev/null +++ b/third_party/absl/absl/base/inline_variable_test_b.cc @@ -0,0 +1,27 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/inline_variable_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +const Foo& get_foo_b() { return inline_variable_foo; } + +const int& get_int_b() { return inline_variable_int; } + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/atomic_hook.h b/third_party/absl/absl/base/internal/atomic_hook.h new file mode 100644 index 000000000..ae21cd7fe --- /dev/null +++ b/third_party/absl/absl/base/internal/atomic_hook.h @@ -0,0 +1,200 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(_MSC_VER) && !defined(__clang__) +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 +#else +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 +#endif + +#if defined(_MSC_VER) +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 +#else +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +class AtomicHook; + +// To workaround AtomicHook not being constant-initializable on some platforms, +// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` +// instead of `ABSL_CONST_INIT`. +#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT +#else +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +#endif + +// `AtomicHook` is a helper class, templatized on a raw function pointer type, +// for implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook. Objects of type `AtomicHook` must have +// static or thread storage duration. +// +// A default constructed object performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Hooks can be pre-registered via constant initialization, for example: +// +// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook +// my_hook(DefaultAction); +// +// and then changed at runtime via a call to `Store()`. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template +class AtomicHook { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : AtomicHook(DummyFunction) {} + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(default_fn), default_fn_(default_fn) {} +#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(kUninitialized), default_fn_(default_fn) {} +#else + // As of January 2020, on all known versions of MSVC this constructor runs in + // the global constructor sequence. If `Store()` is called by a dynamic + // initializer, we want to preserve the value, even if this constructor runs + // after the call to `Store()`. If not, `hook_` will be + // zero-initialized by the linker and we have no need to set it. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) + : /* hook_(deliberately omitted), */ default_fn_(default_fn) { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } +#endif + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + bool success = DoStore(fn); + static_cast(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const { + return DoLoad()(std::forward(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), + "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) { + return default_fn_; + } + return reinterpret_cast(value); + } + + bool DoStore(FnPtr fn) { + assert(fn); + const auto value = reinterpret_cast(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#endif + + const FnPtr default_fn_; +}; + +#undef ABSL_HAVE_WORKING_ATOMIC_POINTER +#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/third_party/absl/absl/base/internal/atomic_hook_test.cc b/third_party/absl/absl/base/internal/atomic_hook_test.cc new file mode 100644 index 000000000..e577a8fd9 --- /dev/null +++ b/third_party/absl/absl/base/internal/atomic_hook_test.cc @@ -0,0 +1,97 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook_test_helper.h" + +namespace { + +using ::testing::Eq; + +int value = 0; +void TestHook(int x) { value = x; } + +TEST(AtomicHookTest, NoDefaultFunction) { + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + void (*)(int)> + hook; + value = 0; + + // Test the default DummyFunction. + EXPECT_TRUE(hook.Load() == nullptr); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 0); + + // Test a stored hook. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +TEST(AtomicHookTest, WithDefaultFunction) { + // Set the default value to TestHook at compile-time. + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< + void (*)(int)> + hook(TestHook); + value = 0; + + // Test the default value is TestHook. + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 0); + hook(1); + EXPECT_EQ(value, 1); + + // Calling Store() with the same hook should not crash. + hook.Store(TestHook); + EXPECT_TRUE(hook.Load() == TestHook); + EXPECT_EQ(value, 1); + hook(2); + EXPECT_EQ(value, 2); +} + +ABSL_CONST_INIT int override_func_calls = 0; +void OverrideFunc() { override_func_calls++; } +static struct OverrideInstaller { + OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); } +} override_installer; + +TEST(AtomicHookTest, DynamicInitFromAnotherTU) { + // MSVC 14.2 doesn't do constexpr static init correctly; in particular it + // tends to sequence static init (i.e. defaults) of `AtomicHook` objects + // after their dynamic init (i.e. overrides), overwriting whatever value was + // written during dynamic init. This regression test validates the fix. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(0)); + absl::atomic_hook_internal::func(); + EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); + EXPECT_THAT(override_func_calls, Eq(1)); + EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc)); +} + +} // namespace diff --git a/third_party/absl/absl/base/internal/atomic_hook_test_helper.cc b/third_party/absl/absl/base/internal/atomic_hook_test_helper.cc new file mode 100644 index 000000000..537d47cd2 --- /dev/null +++ b/third_party/absl/absl/base/internal/atomic_hook_test_helper.cc @@ -0,0 +1,32 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/atomic_hook_test_helper.h" + +#include "absl/base/attributes.h" +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook + func(DefaultFunc); +ABSL_CONST_INIT int default_func_calls = 0; +void DefaultFunc() { default_func_calls++; } +void RegisterFunc(VoidF f) { func.Store(f); } + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/atomic_hook_test_helper.h b/third_party/absl/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 000000000..c72015ef9 --- /dev/null +++ b/third_party/absl/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +using VoidF = void (*)(); +extern absl::base_internal::AtomicHook func; +extern int default_func_calls; +void DefaultFunc(); +void RegisterFunc(VoidF func); + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/third_party/absl/absl/base/internal/cmake_thread_test.cc b/third_party/absl/absl/base/internal/cmake_thread_test.cc new file mode 100644 index 000000000..f70bb24eb --- /dev/null +++ b/third_party/absl/absl/base/internal/cmake_thread_test.cc @@ -0,0 +1,22 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "absl/base/internal/thread_identity.h" + +int main() { + auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent(); + // Make sure the above call can't be optimized out + std::cout << (void*)tid << std::endl; +} diff --git a/third_party/absl/absl/base/internal/cycleclock.cc b/third_party/absl/absl/base/internal/cycleclock.cc new file mode 100644 index 000000000..902e3f5ef --- /dev/null +++ b/third_party/absl/absl/base/internal/cycleclock.cc @@ -0,0 +1,77 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The implementation of CycleClock::Frequency. +// +// NOTE: only i386 and x86_64 have been well tested. +// PPC, sparc, alpha, and ia64 are based on +// http://peter.kuscsik.com/wordpress/?p=14 +// with modifications by m3b. See also +// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h + +#include "absl/base/internal/cycleclock.h" + +#include +#include // NOLINT(build/c++11) + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr int32_t CycleClock::kShift; +constexpr double CycleClock::kFrequencyScale; +#endif + +ABSL_CONST_INIT std::atomic + CycleClock::cycle_clock_source_{nullptr}; + +void CycleClockSource::Register(CycleClockSourceFunc source) { + // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. + CycleClock::cycle_clock_source_.store(source, std::memory_order_release); +} + +#ifdef _WIN32 +int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} +#endif + +#else + +int64_t CycleClock::Now() { + return std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()) + .count(); +} + +double CycleClock::Frequency() { + return 1e9; +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/cycleclock.h b/third_party/absl/absl/base/internal/cycleclock.h new file mode 100644 index 000000000..cbfdf5799 --- /dev/null +++ b/third_party/absl/absl/base/internal/cycleclock.h @@ -0,0 +1,144 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock_config.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using CycleClockSourceFunc = int64_t (*)(); + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: +#if ABSL_USE_UNSCALED_CYCLECLOCK + static CycleClockSourceFunc LoadCycleClockSource(); + + static constexpr int32_t kShift = kCycleClockShift; + static constexpr double kFrequencyScale = kCycleClockFrequencyScale; + + ABSL_CONST_INIT static std::atomic cycle_clock_source_; +#endif // ABSL_USE_UNSCALED_CYCLECLOC + + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; + + friend class CycleClockSource; +}; + +class CycleClockSource { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); +}; + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { +#if !defined(__x86_64__) + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } +#endif // !defined(__x86_64__) + + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); +} + +// Accessing globals in inlined code in Window DLLs is problematic. +#ifndef _WIN32 +inline int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} +#endif + +inline double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/third_party/absl/absl/base/internal/cycleclock_config.h b/third_party/absl/absl/base/internal/cycleclock_config.h new file mode 100644 index 000000000..191112b58 --- /dev/null +++ b/third_party/absl/absl/base/internal/cycleclock_config.h @@ -0,0 +1,55 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1); +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0); +#endif +#else // NDEBUG +// In debug mode use a different shift to discourage depending on a +// particular shift value. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2); +#endif // NDEBUG + +ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale, + 1.0 / (1 << kCycleClockShift)); +#endif // ABSL_USE_UNSCALED_CYCLECLOC + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ diff --git a/third_party/absl/absl/base/internal/direct_mmap.h b/third_party/absl/absl/base/internal/direct_mmap.h new file mode 100644 index 000000000..1beb2ee4e --- /dev/null +++ b/third_party/absl/absl/base/internal/direct_mmap.h @@ -0,0 +1,170 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_MMAP + +#include + +#ifdef __linux__ + +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#if defined(__BIONIC__) || !defined(__GLIBC__) +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ || !__GLIBC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) noexcept { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + defined(__m68k__) || defined(__sh__) || \ + (defined(__hppa__) && !defined(__LP64__)) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) || \ + (defined(__sparc__) && !defined(__arch64__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, + static_cast(offset / pagesize)); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize))); // NOLINT +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast(offset))); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +inline int DirectMunmap(void* start, size_t length) { + return static_cast(syscall(SYS_munmap, start, length)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) { + return mmap(start, length, prot, flags, fd, offset); +} + +inline int DirectMunmap(void* start, size_t length) { + return munmap(start, length); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/third_party/absl/absl/base/internal/dynamic_annotations.h b/third_party/absl/absl/base/internal/dynamic_annotations.h new file mode 100644 index 000000000..b23c5ec1c --- /dev/null +++ b/third_party/absl/absl/base/internal/dynamic_annotations.h @@ -0,0 +1,398 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Decide which features are enabled + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +#define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__clang__) && !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#endif + +// Memory annotations are also made available to LLVM's Memory Sanitizer +#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 +#endif + +#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 +#endif + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ANNOTATE_BENIGN_RACE(address, description) // empty +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ANNOTATE_THREAD_NAME(name) // empty +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 + +#include + +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty +#endif + +#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 + +// Request the analysis tool to ignore all reads in the current thread until +// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ANNOTATE_UNPROTECTED_READ. +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) + +// Stop ignoring reads. +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() + +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() + +#else + +#define ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +#else + +#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +#endif + +#else + +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + char x[8] __attribute__((aligned(8))); \ + } name + +#else + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ diff --git a/third_party/absl/absl/base/internal/endian.h b/third_party/absl/absl/base/internal/endian.h new file mode 100644 index 000000000..943f3d97e --- /dev/null +++ b/third_party/absl/absl/base/internal/endian.h @@ -0,0 +1,283 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/nullability.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +inline uint64_t gbswap_64(uint64_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) + return __builtin_bswap64(host_int); +#elif defined(_MSC_VER) + return _byteswap_uint64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) + return __builtin_bswap32(host_int); +#elif defined(_MSC_VER) + return _byteswap_ulong(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) + return __builtin_bswap16(host_int); +#elif defined(_MSC_VER) + return _byteswap_ushort(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); +#endif +} + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Portable definitions for htonl (host-to-network) and friends on little-endian +// architectures. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// Portable definitions for htonl (host-to-network) etc on big-endian +// architectures. These definitions are simpler since the host byte order is the +// same as network byte order. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(absl::Nonnull p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(absl::Nonnull p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(absl::Nonnull p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(absl::Nonnull p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(absl::Nonnull p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(absl::Nonnull p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(absl::Nonnull p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(absl::Nonnull p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(absl::Nonnull p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(absl::Nonnullp, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(absl::Nonnull p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(absl::Nonnull p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/third_party/absl/absl/base/internal/endian_test.cc b/third_party/absl/absl/base/internal/endian_test.cc new file mode 100644 index 000000000..a1691b1f8 --- /dev/null +++ b/third_party/absl/absl/base/internal/endian_test.cc @@ -0,0 +1,263 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#if defined(ABSL_IS_BIG_ENDIAN) +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +#elif defined(ABSL_IS_LITTLE_ENDIAN) +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +std::vector GenerateAllUint16Values() { + std::vector result; + result.reserve(size_t{1} << (sizeof(uint16_t) * 8)); + for (uint32_t i = std::numeric_limits::min(); + i <= std::numeric_limits::max(); ++i) { + result.push_back(static_cast(i)); + } + return result; +} + +template +std::vector GenerateRandomIntegers(size_t num_values_to_test) { + std::vector result; + result.reserve(num_values_to_test); + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < num_values_to_test; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template +static void GBSwapHelper(const std::vector& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad(expected_value) << "; " + << "actual: 0x" << UnalignedLoad(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllUint16Values(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/errno_saver.h b/third_party/absl/absl/base/internal/errno_saver.h new file mode 100644 index 000000000..251de510f --- /dev/null +++ b/third_party/absl/absl/base/internal/errno_saver.h @@ -0,0 +1,43 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ +#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// `ErrnoSaver` captures the value of `errno` upon construction and restores it +// upon deletion. It is used in low-level code and must be super fast. Do not +// add instrumentation, even in debug modes. +class ErrnoSaver { + public: + ErrnoSaver() : saved_errno_(errno) {} + ~ErrnoSaver() { errno = saved_errno_; } + int operator()() const { return saved_errno_; } + + private: + const int saved_errno_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/third_party/absl/absl/base/internal/errno_saver_test.cc b/third_party/absl/absl/base/internal/errno_saver_test.cc new file mode 100644 index 000000000..e9b742c58 --- /dev/null +++ b/third_party/absl/absl/base/internal/errno_saver_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/errno_saver.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/strerror.h" + +namespace { +using ::testing::Eq; + +struct ErrnoPrinter { + int no; +}; +std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) { + return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]"; +} +bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; } + +TEST(ErrnoSaverTest, Works) { + errno = EDOM; + { + absl::base_internal::ErrnoSaver errno_saver; + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); + errno = ERANGE; + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE})); + EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM})); + } + EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); +} +} // namespace diff --git a/third_party/absl/absl/base/internal/exception_safety_testing.cc b/third_party/absl/absl/base/internal/exception_safety_testing.cc new file mode 100644 index 000000000..6ccac4186 --- /dev/null +++ b/third_party/absl/absl/base/internal/exception_safety_testing.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/exception_safety_testing.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace testing { + +exceptions_internal::NoThrowTag nothrow_ctor; + +exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() { + return {}; +} + +namespace exceptions_internal { + +int countdown = -1; + +ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr; + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) { + if (countdown-- == 0) { + if (throw_bad_alloc) throw TestBadAllocException(msg); + throw TestException(msg); + } +} + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept { + return testing::AssertionFailure() << "Exception thrown from " << e.what(); +} + +std::string GetSpecString(TypeSpec spec) { + std::string out; + absl::string_view sep; + const auto append = [&](absl::string_view s) { + absl::StrAppend(&out, sep, s); + sep = " | "; + }; + if (static_cast(TypeSpec::kNoThrowCopy & spec)) { + append("kNoThrowCopy"); + } + if (static_cast(TypeSpec::kNoThrowMove & spec)) { + append("kNoThrowMove"); + } + if (static_cast(TypeSpec::kNoThrowNew & spec)) { + append("kNoThrowNew"); + } + return out; +} + +std::string GetSpecString(AllocSpec spec) { + return static_cast(AllocSpec::kNoThrowAllocate & spec) + ? "kNoThrowAllocate" + : ""; +} + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third_party/absl/absl/base/internal/exception_safety_testing.h b/third_party/absl/absl/base/internal/exception_safety_testing.h new file mode 100644 index 000000000..c10615440 --- /dev/null +++ b/third_party/absl/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" + +namespace testing { + +enum class TypeSpec; +enum class AllocSpec; + +constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +namespace exceptions_internal { + +std::string GetSpecString(TypeSpec); +std::string GetSpecString(AllocSpec); + +struct NoThrowTag {}; +struct StrongGuaranteeTagType {}; + +// A simple exception class. We throw this so that test code can catch +// exceptions specifically thrown by ThrowingValue. +class TestException { + public: + explicit TestException(absl::string_view msg) : msg_(msg) {} + virtual ~TestException() {} + virtual const char* what() const noexcept { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// TestBadAllocException exists because allocation functions must throw an +// exception which can be caught by a handler of std::bad_alloc. We use a child +// class of std::bad_alloc so we can customise the error message, and also +// derive from TestException so we don't accidentally end up catching an actual +// bad_alloc exception in TestExceptionSafety. +class TestBadAllocException : public std::bad_alloc, public TestException { + public: + explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} + using TestException::what; +}; + +extern int countdown; + +// Allows the countdown variable to be set manually (defaulting to the initial +// value of 0) +inline void SetCountdown(int i = 0) { countdown = i; } +// Sets the countdown to the terminal value -1 +inline void UnsetCountdown() { SetCountdown(-1); } + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept; + +struct TrackedAddress { + bool is_alive; + std::string description; +}; + +// Inspects the constructions and destructions of anything inheriting from +// TrackedObject. This allows us to safely "leak" TrackedObjects, as +// ConstructorTracker will destroy everything left over in its destructor. +class ConstructorTracker { + public: + explicit ConstructorTracker(int count) : countdown_(count) { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) { + if (!CurrentlyTracking()) return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) { + if (!CurrentlyTracking()) return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, + const std::string& address_description, + int countdown, + const std::string& error_description) { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, address); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; +}; + +class TrackedObject { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } +}; +} // namespace exceptions_internal + +extern exceptions_internal::NoThrowTag nothrow_ctor; + +extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +// A test class which is convertible to bool. The conversion can be +// instrumented to throw at a controlled time. +class ThrowingBool { + public: + ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) + operator bool() const { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + + private: + bool b_; +}; + +/* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ +enum class TypeSpec { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, +}; + +/* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ +template +class ThrowingValue : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(TypeSpec spec) { + return static_cast(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + + public: + ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : TrackedObject(GetInstanceString(i)), dummy_(i) {} + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + static void* operator new(size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void* operator new[](size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + + template + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + void operator delete(void* p) noexcept { ::operator delete(p); } + + template + void operator delete(void* p, Args&&... args) noexcept { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void* p) noexcept { return ::operator delete[](p); } + + template + void operator delete[](void* p, Args&&... args) noexcept { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept { return dummy_; } + const int& Get() const noexcept { return dummy_; } + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingValue<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + int dummy_; +}; +// While not having to do with exceptions, explicitly delete comma operator, to +// make sure we don't use it on user-supplied types. +template +void operator,(const ThrowingValue&, T&&) = delete; +template +void operator,(T&&, const ThrowingValue&) = delete; + +/* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ +enum class AllocSpec { + kEverythingThrows = 0, + kNoThrowAllocate = 1, +}; + +/* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ +template +class ThrowingAllocator : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(AllocSpec spec) { + return static_cast(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + template + ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=( + const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept { + dummy_ = std::move(other.State()); + return *this; + } + + template + struct rebind { + using other = ThrowingAllocator; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template + void destroy(U* p) noexcept { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return *this; + } + + template + bool operator==(const ThrowingAllocator& other) const noexcept { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator& other) const noexcept { + return dummy_ != other.dummy_; + } + + template + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingAllocator<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + const std::shared_ptr& State() const { return dummy_; } + std::shared_ptr& State() { return dummy_; } + + void ReadState() { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); + } + } + + static int next_id_; + std::shared_ptr dummy_; +}; + +template +int ThrowingAllocator::next_id_ = 0; + +// Tests for resource leaks by attempting to construct a T using args repeatedly +// until successful, using the countdown method. Side effects can then be +// tested for resource leaks. +template +void TestThrowingCtor(Args&&... args) { + struct Cleanup { + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try { + T temp(std::forward(args)...); + static_cast(temp); + break; + } catch (const exceptions_internal::TestException&) { + } + } +} + +// Tests the nothrow guarantee of the provided nullary operation. If the an +// exception is thrown, the result will be AssertionFailure(). Otherwise, it +// will be AssertionSuccess(). +template +testing::AssertionResult TestNothrowOp(const Operation& operation) { + struct Cleanup { + Cleanup() { exceptions_internal::SetCountdown(); } + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + try { + operation(); + return testing::AssertionSuccess(); + } catch (const exceptions_internal::TestException&) { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } catch (...) { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } +} + +namespace exceptions_internal { + +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. +struct UninitializedT {}; + +template +class DefaultFactory { + public: + explicit DefaultFactory(const T& t) : t_(t) {} + std::unique_ptr operator()() const { return absl::make_unique(t_); } + + private: + T t_; +}; + +template +using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + +template +class ExceptionSafetyTestBuilder; + +} // namespace exceptions_internal + +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + +namespace exceptions_internal { +template +struct IsUniquePtr : std::false_type {}; + +template +struct IsUniquePtr> : std::true_type {}; + +template +struct FactoryPtrTypeHelper { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); +}; + +template +using FactoryPtrType = typename FactoryPtrTypeHelper::type; + +template +using FactoryElementType = typename FactoryPtrType::element_type; + +template +class ExceptionSafetyTest { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, + const Contracts&... contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException& e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) { + return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; +}; + +/* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ +template +class ExceptionSafetyTestBuilder { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T& t) const { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation& new_operation) const { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple...>( + more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template < + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation& new_operation) const { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template < + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const { + return Test(operation_); + } + + private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() {} + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, + const std::tuple& i) + : factory_(f), operation_(o), contracts_(i) {} + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, + absl::index_sequence) const { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)...) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; +}; + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/third_party/absl/absl/base/internal/exception_testing.h b/third_party/absl/absl/base/internal/exception_testing.h new file mode 100644 index 000000000..01b546557 --- /dev/null +++ b/third_party/absl/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/third_party/absl/absl/base/internal/fast_type_id.h b/third_party/absl/absl/base/internal/fast_type_id.h new file mode 100644 index 000000000..a547b3a8b --- /dev/null +++ b/third_party/absl/absl/base/internal/fast_type_id.h @@ -0,0 +1,50 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +struct FastTypeTag { + constexpr static char dummy_var = 0; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +template +constexpr char FastTypeTag::dummy_var; +#endif + +// FastTypeId() evaluates at compile/link-time to a unique pointer for the +// passed-in type. These are meant to be good match for keys into maps or +// straight up comparisons. +using FastTypeIdType = const void*; + +template +constexpr inline FastTypeIdType FastTypeId() { + return &FastTypeTag::dummy_var; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/third_party/absl/absl/base/internal/fast_type_id_test.cc b/third_party/absl/absl/base/internal/fast_type_id_test.cc new file mode 100644 index 000000000..16f3c1458 --- /dev/null +++ b/third_party/absl/absl/base/internal/fast_type_id_test.cc @@ -0,0 +1,123 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/fast_type_id.h" + +#include +#include +#include + +#include "gtest/gtest.h" + +namespace { +namespace bi = absl::base_internal; + +// NOLINTNEXTLINE +#define PRIM_TYPES(A) \ + A(bool) \ + A(short) \ + A(unsigned short) \ + A(int) \ + A(unsigned int) \ + A(long) \ + A(unsigned long) \ + A(long long) \ + A(unsigned long long) \ + A(float) \ + A(double) \ + A(long double) + +TEST(FastTypeIdTest, PrimitiveTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + PRIM_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + PRIM_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +#define FIXED_WIDTH_TYPES(A) \ + A(int8_t) \ + A(uint8_t) \ + A(int16_t) \ + A(uint16_t) \ + A(int32_t) \ + A(uint32_t) \ + A(int64_t) \ + A(uint64_t) + +TEST(FastTypeIdTest, FixedWidthTypes) { + bi::FastTypeIdType type_ids[] = { +#define A(T) bi::FastTypeId(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + FIXED_WIDTH_TYPES(A) +#undef A +#define A(T) bi::FastTypeId(), + FIXED_WIDTH_TYPES(A) +#undef A + }; + size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); + + for (int i = 0; i < total_type_ids; ++i) { + EXPECT_EQ(type_ids[i], type_ids[i]); + for (int j = 0; j < i; ++j) { + EXPECT_NE(type_ids[i], type_ids[j]); + } + } +} + +TEST(FastTypeIdTest, AliasTypes) { + using int_alias = int; + EXPECT_EQ(bi::FastTypeId(), bi::FastTypeId()); +} + +TEST(FastTypeIdTest, TemplateSpecializations) { + EXPECT_NE(bi::FastTypeId>(), + bi::FastTypeId>()); + + EXPECT_NE((bi::FastTypeId>()), + (bi::FastTypeId>())); +} + +struct Base {}; +struct Derived : Base {}; +struct PDerived : private Base {}; + +TEST(FastTypeIdTest, Inheritance) { + EXPECT_NE(bi::FastTypeId(), bi::FastTypeId()); + EXPECT_NE(bi::FastTypeId(), bi::FastTypeId()); +} + +} // namespace diff --git a/third_party/absl/absl/base/internal/hide_ptr.h b/third_party/absl/absl/base/internal/hide_ptr.h new file mode 100644 index 000000000..1dba80909 --- /dev/null +++ b/third_party/absl/absl/base/internal/hide_ptr.h @@ -0,0 +1,51 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Arbitrary value with high bits set. Xor'ing with it is unlikely +// to map one valid pointer to another valid pointer. +constexpr uintptr_t HideMask() { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; +} + +// Hide a pointer from the leak checker. For internal use only. +// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr +// and all objects reachable from ptr to be ignored by the leak checker. +template +inline uintptr_t HidePtr(T* ptr) { + return reinterpret_cast(ptr) ^ HideMask(); +} + +// Return a pointer that has been hidden from the leak checker. +// For internal use only. +template +inline T* UnhidePtr(uintptr_t hidden) { + return reinterpret_cast(hidden ^ HideMask()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/third_party/absl/absl/base/internal/identity.h b/third_party/absl/absl/base/internal/identity.h new file mode 100644 index 000000000..365207b7e --- /dev/null +++ b/third_party/absl/absl/base/internal/identity.h @@ -0,0 +1,39 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal { + +// This is a back-fill of C++20's `std::type_identity`. +template +struct type_identity { + typedef T type; +}; + +// This is a back-fill of C++20's `std::type_identity_t`. +template +using type_identity_t = typename type_identity::type; + +} // namespace internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/third_party/absl/absl/base/internal/inline_variable.h b/third_party/absl/absl/base/internal/inline_variable.h new file mode 100644 index 000000000..09daf0f5b --- /dev/null +++ b/third_party/absl/absl/base/internal/inline_variable.h @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ + +#include + +#include "absl/base/internal/identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// type_identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::internal::type_identity_t name; +#else // Otherwise, just define the macro to do nothing. +#define ABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::type_identity_t name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// type_identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::internal::type_identity_t kInstance = \ + init; \ + }; \ + \ + template \ + constexpr ::absl::internal::type_identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::type_identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ + "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_ diff --git a/third_party/absl/absl/base/internal/inline_variable_testing.h b/third_party/absl/absl/base/internal/inline_variable_testing.h new file mode 100644 index 000000000..f3c81459f --- /dev/null +++ b/third_party/absl/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +struct Foo { + int value = 5; +}; + +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + +ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); +ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + +ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); + +const Foo& get_foo_a(); +const Foo& get_foo_b(); + +const int& get_int_a(); +const int& get_int_b(); + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ diff --git a/third_party/absl/absl/base/internal/invoke.h b/third_party/absl/absl/base/internal/invoke.h new file mode 100644 index 000000000..643c2a42f --- /dev/null +++ b/third_party/absl/absl/base/internal/invoke.h @@ -0,0 +1,241 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// When compiled as C++17 and later versions, it is implemented as an alias of +// std::invoke. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include "absl/base/config.h" + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using std::invoke; +using std::invoke_result_t; +using std::is_invocable_r; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include +#include +#include + +#include "absl/meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template +struct StrippedAccept { + template + struct Accept : Derived::template AcceptImpl::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype((std::declval().* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { +// Ignore bogus GCC warnings on this line. +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return (std::forward(obj).* + std::forward(mem_fun))(std::forward(args)...); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic pop +#endif + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype(((*std::declval()).* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward(ptr)).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward(ref).*std::forward(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward(ptr)).*std::forward(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); + } +}; + +// Resolves to the first matching clause. +template +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, DataMemAndRef, + typename std::conditional::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke. +template +using invoke_result_t = decltype(Invoker::type::Invoke( + std::declval(), std::declval()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template +invoke_result_t invoke(F&& f, Args&&... args) { + return Invoker::type::Invoke(std::forward(f), + std::forward(args)...); +} + +template +struct IsInvocableRImpl : std::false_type {}; + +template +struct IsInvocableRImpl< + absl::void_t >, R, F, + Args...> + : std::integral_constant< + bool, + std::is_convertible, + R>::value || + std::is_void::value> {}; + +// Type trait whose member `value` is true if invoking `F` with `Args` is valid, +// and either the return type is convertible to `R`, or `R` is void. +// C++11-compatible version of `std::is_invocable_r`. +template +using is_invocable_r = IsInvocableRImpl; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/third_party/absl/absl/base/internal/low_level_alloc.cc b/third_party/absl/absl/base/internal/low_level_alloc.cc new file mode 100644 index 000000000..a563f7b9f --- /dev/null +++ b/third_party/absl/absl/base/internal/low_level_alloc.cc @@ -0,0 +1,631 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A low-level allocator that can be used by other low-level +// modules without introducing dependency cycles. +// This allocator is slow and wasteful of memory; +// it should not be used when performance is key. + +#include "absl/base/internal/low_level_alloc.h" + +#include + +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/direct_mmap.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING + +#ifndef _WIN32 +#include +#include +#include +#include +#else +#include +#endif + +#ifdef __linux__ +#include +#endif + +#include + +#include +#include +#include +#include +#include // for placement-new + +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) +#define MAP_ANONYMOUS MAP_ANON +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A first-fit allocator with amortized logarithmic free() time. + +// --------------------------------------------------------------------------- +static const int kMaxLevel = 30; + +namespace { +// This struct describes one allocated block, or one free block. +struct AllocList { + struct Header { + // Size of entire region, including this field. Must be + // first. Valid in both allocated and unallocated blocks. + uintptr_t size; + + // kMagicAllocated or kMagicUnallocated xor this. + uintptr_t magic; + + // Pointer to parent arena. + LowLevelAlloc::Arena *arena; + + // Aligns regions to 0 mod 2*sizeof(void*). + void *dummy_for_alignment; + } header; + + // Next two fields: in unallocated blocks: freelist skiplist data + // in allocated blocks: overlaps with client data + + // Levels in skiplist used. + int levels; + + // Actually has levels elements. The AllocList node may not have room + // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). + AllocList *next[kMaxLevel]; +}; +} // namespace + +// --------------------------------------------------------------------------- +// A trivial skiplist implementation. This is used to keep the freelist +// in address order while taking only logarithmic time per insert and delete. + +// An integer approximation of log2(size/base) +// Requires size >= base. +static int IntLog2(size_t size, size_t base) { + int result = 0; + for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) + result++; + } + // floor(size / 2**result) <= base < floor(size / 2**(result-1)) + // => log2(size/(base+1)) <= result < 1+log2(size/base) + // => result ~= log2(size/base) + return result; +} + +// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. +static int Random(uint32_t *state) { + uint32_t r = *state; + int result = 1; + while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) { + result++; + } + *state = r; + return result; +} + +// Return a number of skiplist levels for a node of size bytes, where +// base is the minimum node size. Compute level=log2(size / base)+n +// where n is 1 if random is false and otherwise a random number generated with +// the standard distribution for a skiplist: See Random() above. +// Bigger nodes tend to have more skiplist levels due to the log2(size / base) +// term, so first-fit searches touch fewer nodes. "level" is clipped so +// level(level) > max_fit) level = static_cast(max_fit); + if (level > kMaxLevel - 1) level = kMaxLevel - 1; + ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); + return level; +} + +// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. +// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater +// points to the last element at level i in the AllocList less than *e, or is +// head if no such element exists. +static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *p = head; + for (int level = head->levels - 1; level >= 0; level--) { + for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { + } + prev[level] = p; + } + return (head->levels == 0) ? nullptr : prev[0]->next[0]; +} + +// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. +// Requires that e->levels be previously set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistInsert(AllocList *head, AllocList *e, + AllocList **prev) { + LLA_SkiplistSearch(head, e, prev); + for (; head->levels < e->levels; head->levels++) { // extend prev pointers + prev[head->levels] = head; // to all *e's levels + } + for (int i = 0; i != e->levels; i++) { // add element to list + e->next[i] = prev[i]->next[i]; + prev[i]->next[i] = e; + } +} + +// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). +// Requires that e->levels be previous set by the caller (using +// LLA_SkiplistLevels()) +static void LLA_SkiplistDelete(AllocList *head, AllocList *e, + AllocList **prev) { + AllocList *found = LLA_SkiplistSearch(head, e, prev); + ABSL_RAW_CHECK(e == found, "element not in freelist"); + for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { + prev[i]->next[i] = e->next[i]; + } + while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { + head->levels--; // reduce head->levels if level unused + } +} + +// --------------------------------------------------------------------------- +// Arena implementation + +// Metadata for an LowLevelAlloc arena instance. +struct LowLevelAlloc::Arena { + // Constructs an arena with the given LowLevelAlloc flags. + explicit Arena(uint32_t flags_value); + + base_internal::SpinLock mu; + // Head of free list, sorted by address + AllocList freelist ABSL_GUARDED_BY(mu); + // Count of allocated blocks + int32_t allocation_count ABSL_GUARDED_BY(mu); + // flags passed to NewArena + const uint32_t flags; + // Result of sysconf(_SC_PAGESIZE) + const size_t pagesize; + // Lowest power of two >= max(16, sizeof(AllocList)) + const size_t round_up; + // Smallest allocation block size + const size_t min_size; + // PRNG state + uint32_t random ABSL_GUARDED_BY(mu); +}; + +namespace { +// Static storage space for the lazily-constructed, default global arena +// instances. We require this space because the whole point of LowLevelAlloc +// is to avoid relying on malloc/new. +alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( + LowLevelAlloc::Arena)]; +alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( + LowLevelAlloc::Arena)]; +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +alignas( + LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage + [sizeof(LowLevelAlloc::Arena)]; +#endif + +// We must use LowLevelCallOnce here to construct the global arenas, rather than +// using function-level statics, to avoid recursively invoking the scheduler. +absl::once_flag create_globals_once; + +void CreateGlobalArenas() { + new (&default_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); + new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + new (&unhooked_async_sig_safe_arena_storage) + LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); +#endif +} + +// Returns a global arena that does not call into hooks. Used by NewArena() +// when kCallMallocHook is not set. +LowLevelAlloc::Arena *UnhookedArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast(&unhooked_arena_storage); +} + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +// Returns a global arena that is async-signal safe. Used by NewArena() when +// kAsyncSignalSafe is set. +LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast( + &unhooked_async_sig_safe_arena_storage); +} +#endif + +} // namespace + +// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. +LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { + base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); + return reinterpret_cast(&default_arena_storage); +} + +// magic numbers to identify allocated and unallocated blocks +static const uintptr_t kMagicAllocated = 0x4c833e95U; +static const uintptr_t kMagicUnallocated = ~kMagicAllocated; + +namespace { +class ABSL_SCOPED_LOCKABLE ArenaLock { + public: + explicit ArenaLock(LowLevelAlloc::Arena *arena) + ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) + : arena_(arena) { +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + sigset_t all; + sigfillset(&all); + mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; + } +#endif + arena_->mu.Lock(); + } + ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } + void Leave() ABSL_UNLOCK_FUNCTION() { + arena_->mu.Unlock(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (mask_valid_) { + const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); + if (err != 0) { + ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); + } + } +#endif + left_ = true; + } + + private: + bool left_ = false; // whether left region +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + bool mask_valid_ = false; + sigset_t mask_; // old mask of blocked signals +#endif + LowLevelAlloc::Arena *arena_; + ArenaLock(const ArenaLock &) = delete; + ArenaLock &operator=(const ArenaLock &) = delete; +}; +} // namespace + +// create an appropriate magic number for an object at "ptr" +// "magic" should be kMagicAllocated or kMagicUnallocated +inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { + return magic ^ reinterpret_cast(ptr); +} + +namespace { +size_t GetPageSize() { +#ifdef _WIN32 + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); +#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__) + return getpagesize(); +#else + return static_cast(sysconf(_SC_PAGESIZE)); +#endif +} + +size_t RoundedUpBlockSize() { + // Round up block sizes to a power of two close to the header size. + size_t round_up = 16; + while (round_up < sizeof(AllocList::Header)) { + round_up += round_up; + } + return round_up; +} + +} // namespace + +LowLevelAlloc::Arena::Arena(uint32_t flags_value) + : mu(base_internal::SCHEDULE_KERNEL_ONLY), + allocation_count(0), + flags(flags_value), + pagesize(GetPageSize()), + round_up(RoundedUpBlockSize()), + min_size(2 * round_up), + random(0) { + freelist.header.size = 0; + freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); + freelist.header.arena = this; + freelist.levels = 0; + memset(freelist.next, 0, sizeof(freelist.next)); +} + +// L < meta_data_arena->mu +LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { + Arena *meta_data_arena = DefaultArena(); +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + meta_data_arena = UnhookedAsyncSigSafeArena(); + } else // NOLINT(readability/braces) +#endif + if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { + meta_data_arena = UnhookedArena(); + } + Arena *result = + new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags); + return result; +} + +// L < arena->mu, L < arena->arena->mu +bool LowLevelAlloc::DeleteArena(Arena *arena) { + ABSL_RAW_CHECK( + arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), + "may not delete default arena"); + ArenaLock section(arena); + if (arena->allocation_count != 0) { + section.Leave(); + return false; + } + while (arena->freelist.next[0] != nullptr) { + AllocList *region = arena->freelist.next[0]; + size_t size = region->header.size; + arena->freelist.next[0] = region->next[0]; + ABSL_RAW_CHECK( + region->header.magic == Magic(kMagicUnallocated, ®ion->header), + "bad magic number in DeleteArena()"); + ABSL_RAW_CHECK(region->header.arena == arena, + "bad arena pointer in DeleteArena()"); + ABSL_RAW_CHECK(size % arena->pagesize == 0, + "empty arena has non-page-aligned block size"); + ABSL_RAW_CHECK(reinterpret_cast(region) % arena->pagesize == 0, + "empty arena has non-page-aligned block"); + int munmap_result; +#ifdef _WIN32 + munmap_result = VirtualFree(region, 0, MEM_RELEASE); + ABSL_RAW_CHECK(munmap_result != 0, + "LowLevelAlloc::DeleteArena: VitualFree failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { + munmap_result = munmap(region, size); + } else { + munmap_result = base_internal::DirectMunmap(region, size); + } +#else + munmap_result = munmap(region, size); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (munmap_result != 0) { + ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", + errno); + } +#endif // _WIN32 + } + section.Leave(); + arena->~Arena(); + Free(arena); + return true; +} + +// --------------------------------------------------------------------------- + +// Addition, checking for overflow. The intent is to die if an external client +// manages to push through a request that would cause arithmetic to fail. +static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { + uintptr_t sum = a + b; + ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); + return sum; +} + +// Return value rounded up to next multiple of align. +// align must be a power of two. +static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { + return CheckedAdd(addr, align - 1) & ~(align - 1); +} + +// Equivalent to "return prev->next[i]" but with sanity checking +// that the freelist is in the correct order, that it +// consists of regions marked "unallocated", and that no two regions +// are adjacent in memory (they should have been coalesced). +// L >= arena->mu +static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { + ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); + AllocList *next = prev->next[i]; + if (next != nullptr) { + ABSL_RAW_CHECK( + next->header.magic == Magic(kMagicUnallocated, &next->header), + "bad magic number in Next()"); + ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); + if (prev != &arena->freelist) { + ABSL_RAW_CHECK(prev < next, "unordered freelist"); + ABSL_RAW_CHECK(reinterpret_cast(prev) + prev->header.size < + reinterpret_cast(next), + "malformed freelist"); + } + } + return next; +} + +// Coalesce list item "a" with its successor if they are adjacent. +static void Coalesce(AllocList *a) { + AllocList *n = a->next[0]; + if (n != nullptr && reinterpret_cast(a) + a->header.size == + reinterpret_cast(n)) { + LowLevelAlloc::Arena *arena = a->header.arena; + a->header.size += n->header.size; + n->header.magic = 0; + n->header.arena = nullptr; + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, n, prev); + LLA_SkiplistDelete(&arena->freelist, a, prev); + a->levels = + LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); + LLA_SkiplistInsert(&arena->freelist, a, prev); + } +} + +// Adds block at location "v" to the free list +// L >= arena->mu +static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { + AllocList *f = reinterpret_cast(reinterpret_cast(v) - + sizeof(f->header)); + ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), + "bad magic number in AddToFreelist()"); + ABSL_RAW_CHECK(f->header.arena == arena, + "bad arena pointer in AddToFreelist()"); + f->levels = + LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); + AllocList *prev[kMaxLevel]; + LLA_SkiplistInsert(&arena->freelist, f, prev); + f->header.magic = Magic(kMagicUnallocated, &f->header); + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor +} + +// Frees storage allocated by LowLevelAlloc::Alloc(). +// L < arena->mu +void LowLevelAlloc::Free(void *v) { + if (v != nullptr) { + AllocList *f = reinterpret_cast(reinterpret_cast(v) - + sizeof(f->header)); + LowLevelAlloc::Arena *arena = f->header.arena; + ArenaLock section(arena); + AddToFreelist(v, arena); + ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); + arena->allocation_count--; + section.Leave(); + } +} + +// allocates and returns a block of size bytes, to be freed with Free() +// L < arena->mu +static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { + void *result = nullptr; + if (request != 0) { + AllocList *s; // will point to region that satisfies request + ArenaLock section(arena); + // round up with header + size_t req_rnd = + RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up); + for (;;) { // loop until we find a suitable region + // find the minimum levels that a block of this size must have + int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; + if (i < arena->freelist.levels) { // potential blocks exist + AllocList *before = &arena->freelist; // predecessor of s + while ((s = Next(i, before, arena)) != nullptr && + s->header.size < req_rnd) { + before = s; + } + if (s != nullptr) { // we found a region + break; + } + } + // we unlock before mmap() both because mmap() may call a callback hook, + // and because it may be slow. + arena->mu.Unlock(); + // mmap generous 64K chunks to decrease + // the chances/impact of fragmentation: + size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); + void *new_pages; +#ifdef _WIN32 + new_pages = VirtualAlloc(nullptr, new_pages_size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); +#else +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = base_internal::DirectMmap(nullptr, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + } +#else + new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + if (new_pages == MAP_FAILED) { + ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); + } + +#ifdef __linux__ +#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME) + // Attempt to name the allocated address range in /proc/$PID/smaps on + // Linux. + // + // This invocation of prctl() may fail if the Linux kernel was not + // configured with the CONFIG_ANON_VMA_NAME option. This is OK since + // the naming of arenas is primarily a debugging aid. + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size, + "absl"); +#endif +#endif // __linux__ +#endif // _WIN32 + arena->mu.Lock(); + s = reinterpret_cast(new_pages); + s->header.size = new_pages_size; + // Pretend the block is allocated; call AddToFreelist() to free it. + s->header.magic = Magic(kMagicAllocated, &s->header); + s->header.arena = arena; + AddToFreelist(&s->levels, arena); // insert new region into free list + } + AllocList *prev[kMaxLevel]; + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + // s points to the first free region that's big enough + if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { + // big enough to split + AllocList *n = + reinterpret_cast(req_rnd + reinterpret_cast(s)); + n->header.size = s->header.size - req_rnd; + n->header.magic = Magic(kMagicAllocated, &n->header); + n->header.arena = arena; + s->header.size = req_rnd; + AddToFreelist(&n->levels, arena); + } + s->header.magic = Magic(kMagicAllocated, &s->header); + ABSL_RAW_CHECK(s->header.arena == arena, ""); + arena->allocation_count++; + section.Leave(); + result = &s->levels; + } + ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); + return result; +} + +void *LowLevelAlloc::Alloc(size_t request) { + void *result = DoAllocWithArena(request, DefaultArena()); + return result; +} + +void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { + ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); + void *result = DoAllocWithArena(request, arena); + return result; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_LOW_LEVEL_ALLOC_MISSING diff --git a/third_party/absl/absl/base/internal/low_level_alloc.h b/third_party/absl/absl/base/internal/low_level_alloc.h new file mode 100644 index 000000000..c2f1f25d8 --- /dev/null +++ b/third_party/absl/absl/base/internal/low_level_alloc.h @@ -0,0 +1,127 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \ + defined(__hexagon__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena *NewArena(uint32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/third_party/absl/absl/base/internal/low_level_alloc_test.cc b/third_party/absl/absl/base/internal/low_level_alloc_test.cc new file mode 100644 index 000000000..8fdec09e8 --- /dev/null +++ b/third_party/absl/absl/base/internal/low_level_alloc_test.cc @@ -0,0 +1,180 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/low_level_alloc.h" + +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#ifdef __EMSCRIPTEN__ +#include +#endif + +#include "absl/container/node_hash_map.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +// This test doesn't use gtest since it needs to test that everything +// works before main(). +#define TEST_ASSERT(x) \ + if (!(x)) { \ + printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ + abort(); \ + } + +// a block of memory obtained from the allocator +struct BlockDesc { + char *ptr; // pointer to memory + int len; // number of bytes + int fill; // filled with data starting with this +}; + +// Check that the pattern placed in the block d +// by RandomizeBlockDesc is still there. +static void CheckBlockDesc(const BlockDesc &d) { + for (int i = 0; i != d.len; i++) { + TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); + } +} + +// Fill the block "*d" with a pattern +// starting with a random byte. +static void RandomizeBlockDesc(BlockDesc *d) { + d->fill = rand() & 0xff; + for (int i = 0; i != d->len; i++) { + d->ptr[i] = (d->fill + i) & 0xff; + } +} + +// Use to indicate to the malloc hooks that +// this calls is from LowLevelAlloc. +static bool using_low_level_alloc = false; + +// n times, toss a coin, and based on the outcome +// either allocate a new block or deallocate an old block. +// New blocks are placed in a std::unordered_map with a random key +// and initialized with RandomizeBlockDesc(). +// If keys conflict, the older block is freed. +// Old blocks are always checked with CheckBlockDesc() +// before being freed. At the end of the run, +// all remaining allocated blocks are freed. +// If use_new_arena is true, use a fresh arena, and then delete it. +// If call_malloc_hook is true and user_arena is true, +// allocations and deallocations are reported via the MallocHook +// interface. +static void Test(bool use_new_arena, bool call_malloc_hook, int n) { + typedef absl::node_hash_map AllocMap; + AllocMap allocated; + AllocMap::iterator it; + BlockDesc block_desc; + int rnd; + LowLevelAlloc::Arena *arena = nullptr; + if (use_new_arena) { + int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; + arena = LowLevelAlloc::NewArena(flags); + } + for (int i = 0; i != n; i++) { + if (i != 0 && i % 10000 == 0) { + printf("."); + fflush(stdout); + } + + switch (rand() & 1) { // toss a coin + case 0: // coin came up heads: add a block + using_low_level_alloc = true; + block_desc.len = rand() & 0x3fff; + block_desc.ptr = reinterpret_cast( + arena == nullptr + ? LowLevelAlloc::Alloc(block_desc.len) + : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); + using_low_level_alloc = false; + RandomizeBlockDesc(&block_desc); + rnd = rand(); + it = allocated.find(rnd); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + it->second = block_desc; + } else { + allocated[rnd] = block_desc; + } + break; + case 1: // coin came up tails: remove a block + it = allocated.begin(); + if (it != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + break; + } + } + // remove all remaining blocks + while ((it = allocated.begin()) != allocated.end()) { + CheckBlockDesc(it->second); + using_low_level_alloc = true; + LowLevelAlloc::Free(it->second.ptr); + using_low_level_alloc = false; + allocated.erase(it); + } + if (use_new_arena) { + TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); + } +} + +// LowLevelAlloc is designed to be safe to call before main(). +static struct BeforeMain { + BeforeMain() { + Test(false, false, 50000); + Test(true, false, 50000); + Test(true, true, 50000); + } +} before_main; + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +int main(int argc, char *argv[]) { + // The actual test runs in the global constructor of `before_main`. + printf("PASS\n"); +#ifdef __EMSCRIPTEN__ + // clang-format off +// This is JS here. Don't try to format it. + MAIN_THREAD_EM_ASM({ + if (ENVIRONMENT_IS_WEB) { + if (typeof TEST_FINISH === 'function') { + TEST_FINISH($0); + } else { + console.error('Attempted to exit with status ' + $0); + console.error('But TEST_FINSIHED is not a function.'); + } + } + }, 0); +// clang-format on +#endif + return 0; +} diff --git a/third_party/absl/absl/base/internal/low_level_scheduling.h b/third_party/absl/absl/base/internal/low_level_scheduling.h new file mode 100644 index 000000000..9baccc065 --- /dev/null +++ b/third_party/absl/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,134 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +ABSL_NAMESPACE_BEGIN +class CondVar; +class Mutex; + +namespace synchronization_internal { +int MutexDelay(int32_t c, int mode); +} // namespace synchronization_internal + +namespace base_internal { + +class SchedulingHelper; // To allow use of SchedulingGuard. +class SpinLock; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // A scoped helper to enable rescheduling temporarily. + // REQUIRES: destructor must run in same thread as constructor. + class ScopedEnable { + public: + ScopedEnable(); + ~ScopedEnable(); + + private: + int scheduling_disabled_depth_; + }; + + // Access to SchedulingGuard is explicitly permitted. + friend class absl::CondVar; + friend class absl::Mutex; + friend class SchedulingHelper; + friend class SpinLock; + friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ + +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + +inline SchedulingGuard::ScopedEnable::ScopedEnable() + : scheduling_disabled_depth_(0) {} +inline SchedulingGuard::ScopedEnable::~ScopedEnable() { + ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/third_party/absl/absl/base/internal/nullability_impl.h b/third_party/absl/absl/base/internal/nullability_impl.h new file mode 100644 index 000000000..36e1b33d6 --- /dev/null +++ b/third_party/absl/absl/base/internal/nullability_impl.h @@ -0,0 +1,106 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ +#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/meta/type_traits.h" + +namespace absl { + +namespace nullability_internal { + +// `IsNullabilityCompatible` checks whether its first argument is a class +// explicitly tagged as supporting nullability annotations. The tag is the type +// declaration `absl_nullability_compatible`. +template +struct IsNullabilityCompatible : std::false_type {}; + +template +struct IsNullabilityCompatible< + T, absl::void_t> : std::true_type { +}; + +template +constexpr bool IsSupportedType = IsNullabilityCompatible::value; + +template +constexpr bool IsSupportedType = true; + +template +constexpr bool IsSupportedType = true; + +template +constexpr bool IsSupportedType> = true; + +template +constexpr bool IsSupportedType> = true; + +template +struct EnableNullable { + static_assert(nullability_internal::IsSupportedType>, + "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; +}; + +template +struct EnableNonnull { + static_assert(nullability_internal::IsSupportedType>, + "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; +}; + +template +struct EnableNullabilityUnknown { + static_assert(nullability_internal::IsSupportedType>, + "Template argument must be a raw or supported smart pointer " + "type. See absl/base/nullability.h."); + using type = T; +}; + +// Note: we do not apply Clang nullability attributes (e.g. _Nullable). These +// only support raw pointers, and conditionally enabling them only for raw +// pointers inhibits template arg deduction. Ideally, they would support all +// pointer-like types. +template ::type> +using NullableImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nullable")]] +#endif + = T; + +template ::type> +using NonnullImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nonnull")]] +#endif + = T; + +template ::type> +using NullabilityUnknownImpl +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) + [[clang::annotate("Nullability_Unspecified")]] +#endif + = T; + +} // namespace nullability_internal +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_ diff --git a/third_party/absl/absl/base/internal/per_thread_tls.h b/third_party/absl/absl/base/internal/per_thread_tls.h new file mode 100644 index 000000000..cf5e97a04 --- /dev/null +++ b/third_party/absl/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/third_party/absl/absl/base/internal/pretty_function.h b/third_party/absl/absl/base/internal/pretty_function.h new file mode 100644 index 000000000..35d51676d --- /dev/null +++ b/third_party/absl/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/third_party/absl/absl/base/internal/raw_logging.cc b/third_party/absl/absl/base/internal/raw_logging.cc new file mode 100644 index 000000000..d32b40a8b --- /dev/null +++ b/third_party/absl/absl/base/internal/raw_logging.cc @@ -0,0 +1,280 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/raw_logging.h" + +#include +#include +#include +#include +#include +#include + +#ifdef __EMSCRIPTEN__ +#include +#endif + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/errno_saver.h" +#include "absl/base/log_severity.h" + +// We know how to perform low-level writes to stderr in POSIX and Windows. For +// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. +// Much of raw_logging.cc becomes a no-op when we can't output messages, +// although a FATAL ABSL_RAW_LOG message will still abort the process. + +// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() +// (as from unistd.h) +// +// This preprocessor token is also defined in raw_io.cc. If you need to copy +// this, consider moving both to config.h instead. +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__hexagon__) || defined(__Fuchsia__) || \ + defined(__native_client__) || defined(__OpenBSD__) || \ + defined(__EMSCRIPTEN__) || defined(__ASYLO__) + +#include + +#define ABSL_HAVE_POSIX_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_POSIX_WRITE +#endif + +// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall +// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); +// for low level operations that want to avoid libc. +#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) +#include +#define ABSL_HAVE_SYSCALL_WRITE 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_SYSCALL_WRITE +#endif + +#ifdef _WIN32 +#include + +#define ABSL_HAVE_RAW_IO 1 +#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 +#else +#undef ABSL_HAVE_RAW_IO +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_log_internal { +namespace { + +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for +// a selected set of platforms for which we expect not to be able to raw log. + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED +constexpr char kTruncated[] = " ... (message truncated)\n"; + +// sprintf the format to the buffer, adjusting *buf and *size to reflect the +// consumed bytes, and return whether the message fit without truncation. If +// truncation occurred, if possible leave room in the buffer for the message +// kTruncated[]. +bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) + ABSL_PRINTF_ATTRIBUTE(3, 0); +bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { + if (*size < 0) return false; + int n = vsnprintf(*buf, static_cast(*size), format, ap); + bool result = true; + if (n < 0 || n > *size) { + result = false; + if (static_cast(*size) > sizeof(kTruncated)) { + n = *size - static_cast(sizeof(kTruncated)); + } else { + n = 0; // no room for truncation message + } + } + *size -= n; + *buf += n; + return result; +} +#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED + +constexpr int kLogBufSize = 3000; + +// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths +// that invoke malloc() and getenv() that might acquire some locks. + +// Helper for RawLog below. +// *DoRawLog writes to *buf of *size and move them past the written portion. +// It returns true iff there was no overflow or error. +bool DoRawLog(char** buf, int* size, const char* format, ...) + ABSL_PRINTF_ATTRIBUTE(3, 4); +bool DoRawLog(char** buf, int* size, const char* format, ...) { + if (*size < 0) return false; + va_list ap; + va_start(ap, format); + int n = vsnprintf(*buf, static_cast(*size), format, ap); + va_end(ap); + if (n < 0 || n > *size) return false; + *size -= n; + *buf += n; + return true; +} + +bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line, + char** buf, int* buf_size) { + DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line); + return true; +} + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +absl::base_internal::AtomicHook + log_filter_and_prefix_hook(DefaultLogFilterAndPrefix); +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +absl::base_internal::AtomicHook abort_hook; + +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); +void RawLogVA(absl::LogSeverity severity, const char* file, int line, + const char* format, va_list ap) { + char buffer[kLogBufSize]; + char* buf = buffer; + int size = sizeof(buffer); +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + bool enabled = true; +#else + bool enabled = false; +#endif + +#ifdef ABSL_MIN_LOG_LEVEL + if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && + severity < absl::LogSeverity::kFatal) { + enabled = false; + } +#endif + + enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size); + const char* const prefix_end = buf; + +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + if (enabled) { + bool no_chop = VADoRawLog(&buf, &size, format, ap); + if (no_chop) { + DoRawLog(&buf, &size, "\n"); + } else { + DoRawLog(&buf, &size, "%s", kTruncated); + } + AsyncSignalSafeWriteError(buffer, strlen(buffer)); + } +#else + static_cast(format); + static_cast(ap); + static_cast(enabled); +#endif + + // Abort the process after logging a FATAL message, even if the output itself + // was suppressed. + if (severity == absl::LogSeverity::kFatal) { + abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); + abort(); + } +} + +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, + const std::string& message) { + RawLog(severity, file, line, "%.*s", static_cast(message.size()), + message.data()); +} + +} // namespace + +void AsyncSignalSafeWriteError(const char* s, size_t len) { + if (!len) return; + absl::base_internal::ErrnoSaver errno_saver; +#if defined(__EMSCRIPTEN__) + // In WebAssembly, bypass filesystem emulation via fwrite. + if (s[len - 1] == '\n') { + // Skip a trailing newline character as emscripten_errn adds one itself. + len--; + } + // emscripten_errn was introduced in 3.1.41 but broken in standalone mode + // until 3.1.43. +#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043 + emscripten_errn(s, len); +#else + char buf[kLogBufSize]; + if (len >= kLogBufSize) { + len = kLogBufSize - 1; + constexpr size_t trunc_len = sizeof(kTruncated) - 2; + memcpy(buf + len - trunc_len, kTruncated, trunc_len); + buf[len] = '\0'; + len -= trunc_len; + } else { + buf[len] = '\0'; + } + memcpy(buf, s, len); + _emscripten_err(buf); +#endif +#elif defined(ABSL_HAVE_SYSCALL_WRITE) + // We prefer calling write via `syscall` to minimize the risk of libc doing + // something "helpful". + syscall(SYS_write, STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_POSIX_WRITE) + write(STDERR_FILENO, s, len); +#elif defined(ABSL_HAVE_RAW_IO) + _write(/* stderr */ 2, s, static_cast(len)); +#else + // stderr logging unsupported on this platform + (void)s; + (void)len; +#endif +} + +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) { + va_list ap; + va_start(ap, format); + RawLogVA(severity, file, line, format, ap); + va_end(ap); +} + +bool RawLoggingFullySupported() { +#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED + return true; +#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED + return false; +#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED +} + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL + absl::base_internal::AtomicHook + internal_log_function(DefaultInternalLog); + +void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) { + log_filter_and_prefix_hook.Store(func); +} + +void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } + +void RegisterInternalLogFunction(InternalLogFunction func) { + internal_log_function.Store(func); +} + +} // namespace raw_log_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/raw_logging.h b/third_party/absl/absl/base/internal/raw_logging.h new file mode 100644 index 000000000..d7cfbc576 --- /dev/null +++ b/third_party/absl/absl/base/internal/raw_logging.h @@ -0,0 +1,217 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file + +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_log_internal_basename = \ + ::absl::raw_log_internal::Basename(__FILE__, sizeof(__FILE__) - 1); \ + ::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity, \ + absl_raw_log_internal_basename, __LINE__, \ + __VA_ARGS__); \ + ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_log_internal_filename = __FILE__; \ + ::absl::raw_log_internal::internal_log_function( \ + ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \ + __LINE__, message); \ + ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + +#ifndef NDEBUG + +#define ABSL_RAW_DLOG(severity, ...) ABSL_RAW_LOG(severity, __VA_ARGS__) +#define ABSL_RAW_DCHECK(condition, message) ABSL_RAW_CHECK(condition, message) + +#else // NDEBUG + +#define ABSL_RAW_DLOG(severity, ...) \ + while (false) ABSL_RAW_LOG(severity, __VA_ARGS__) +#define ABSL_RAW_DCHECK(condition, message) \ + while (false) ABSL_RAW_CHECK(condition, message) + +#endif // NDEBUG + +#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOG_INTERNAL_DFATAL ::absl::kLogDebugFatal +#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE() +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_DFATAL +#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_log_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// Writes the provided buffer directly to stderr, in a signal-safe, low-level +// manner. Preserves errno. +void AsyncSignalSafeWriteError(const char* s, size_t len); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the string; the second parameter is the length of the string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_log customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_log system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buf and decrement *buf_size +// accordingly. +using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, + const char* file, int line, char** buf, + int* buf_size); + +// Function type for a raw_log customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The NUL-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogFilterAndPrefixHook.) +// +// The lifetime of the filename and message buffers will not end while the +// process remains alive. +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +using InternalLogFunction = void (*)(absl::LogSeverity severity, + const char* file, int line, + const std::string& message); + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< + InternalLogFunction> + internal_log_function; + +// Registers hooks of the above types. Only a single hook of each type may be +// registered. It is an error to call these functions multiple times with +// different input arguments. +// +// These functions are safe to call at any point during initialization; they do +// not block or malloc, and are async-signal safe. +void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); +void RegisterAbortHook(AbortHook func); +void RegisterInternalLogFunction(InternalLogFunction func); + +} // namespace raw_log_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/third_party/absl/absl/base/internal/scheduling_mode.h b/third_party/absl/absl/base/internal/scheduling_mode.h new file mode 100644 index 000000000..8be5ab6dd --- /dev/null +++ b/third_party/absl/absl/base/internal/scheduling_mode.h @@ -0,0 +1,58 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/third_party/absl/absl/base/internal/scoped_set_env.cc b/third_party/absl/absl/base/internal/scoped_set_env.cc new file mode 100644 index 000000000..8a934cb51 --- /dev/null +++ b/third_party/absl/absl/base/internal/scoped_set_env.cc @@ -0,0 +1,81 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/scoped_set_env.h" + +#ifdef _WIN32 +#include +#endif + +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { + +#ifdef _WIN32 +const int kMaxEnvVarValueSize = 1024; +#endif + +void SetEnvVar(const char* name, const char* value) { +#ifdef _WIN32 + SetEnvironmentVariableA(name, value); +#else + if (value == nullptr) { + ::unsetenv(name); + } else { + ::setenv(name, value, 1); + } +#endif +} + +} // namespace + +ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value) + : var_name_(var_name), was_unset_(false) { +#ifdef _WIN32 + char buf[kMaxEnvVarValueSize]; + auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf)); + ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size"); + + if (get_res == 0) { + was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND); + } else { + old_value_.assign(buf, get_res); + } + + SetEnvironmentVariableA(var_name_.c_str(), new_value); +#else + const char* val = ::getenv(var_name_.c_str()); + if (val == nullptr) { + was_unset_ = true; + } else { + old_value_ = val; + } +#endif + + SetEnvVar(var_name_.c_str(), new_value); +} + +ScopedSetEnv::~ScopedSetEnv() { + SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/scoped_set_env.h b/third_party/absl/absl/base/internal/scoped_set_env.h new file mode 100644 index 000000000..19ec7b5d8 --- /dev/null +++ b/third_party/absl/absl/base/internal/scoped_set_env.h @@ -0,0 +1,45 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ScopedSetEnv { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/third_party/absl/absl/base/internal/scoped_set_env_test.cc b/third_party/absl/absl/base/internal/scoped_set_env_test.cc new file mode 100644 index 000000000..5cbad246c --- /dev/null +++ b/third_party/absl/absl/base/internal/scoped_set_env_test.cc @@ -0,0 +1,99 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef _WIN32 +#include +#endif + +#include "gtest/gtest.h" +#include "absl/base/internal/scoped_set_env.h" + +namespace { + +using absl::base_internal::ScopedSetEnv; + +std::string GetEnvVar(const char* name) { +#ifdef _WIN32 + char buf[1024]; + auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf)); + if (get_res >= sizeof(buf)) { + return "TOO_BIG"; + } + + if (get_res == 0) { + return "UNSET"; + } + + return std::string(buf, get_res); +#else + const char* val = ::getenv(name); + if (val == nullptr) { + return "UNSET"; + } + + return val; +#endif +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToString) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetNonExistingVarToNull) { + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToString) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value"); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +TEST(ScopedSetEnvTest, SetExistingVarToNull) { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); + + { + ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); + } + + EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); +} + +} // namespace diff --git a/third_party/absl/absl/base/internal/spinlock.cc b/third_party/absl/absl/base/internal/spinlock.cc new file mode 100644 index 000000000..381b913b2 --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock.cc @@ -0,0 +1,232 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/spinlock.h" + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ +#include "absl/base/call_once.h" + +// Description of lock-word: +// 31..00: [............................3][2][1][0] +// +// [0]: kSpinLockHeld +// [1]: kSpinLockCooperative +// [2]: kSpinLockDisabledScheduling +// [31..3]: ONLY kSpinLockSleeper OR +// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT +// +// Detailed descriptions: +// +// Bit [0]: The lock is considered held iff kSpinLockHeld is set. +// +// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when +// contended iff kSpinLockCooperative is set. +// +// Bit [2]: This bit is exclusive from bit [1]. It is used only by a +// non-cooperative lock. When set, indicates that scheduling was +// successfully disabled when the lock was acquired. May be unset, +// even if non-cooperative, if a ThreadIdentity did not yet exist at +// time of acquisition. +// +// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was +// acquired without contention, however, at least one waiter exists. +// +// Otherwise, bits [31..3] represent the time spent by the current lock +// holder to acquire the lock. There may be outstanding waiter(s). + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook + submit_profile_data; + +void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, + int64_t wait_cycles)) { + submit_profile_data.Store(fn); +} + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// Static member variable definitions. +constexpr uint32_t SpinLock::kSpinLockHeld; +constexpr uint32_t SpinLock::kSpinLockCooperative; +constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; +constexpr uint32_t SpinLock::kSpinLockSleeper; +constexpr uint32_t SpinLock::kWaitTimeMask; +#endif + +// Uncommon constructors. +SpinLock::SpinLock(base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); +} + +// Monitor the lock to see if its value changes within some time period +// (adaptive_spin_count loop iterations). The last value read from the lock +// is returned from the method. +uint32_t SpinLock::SpinLoop() { + // We are already in the slow path of SpinLock, initialize the + // adaptive_spin_count here. + ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; + ABSL_CONST_INIT static int adaptive_spin_count = 0; + base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { + adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; + }); + + int c = adaptive_spin_count; + uint32_t lock_value; + do { + lock_value = lockword_.load(std::memory_order_relaxed); + } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); + return lock_value; +} + +void SpinLock::SlowLock() { + uint32_t lock_value = SpinLoop(); + lock_value = TryLockInternal(lock_value, 0); + if ((lock_value & kSpinLockHeld) == 0) { + return; + } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + + // The lock was not obtained initially, so this thread needs to wait for + // it. Record the current timestamp in the local variable wait_start_time + // so the total wait time can be stored in the lockword once this thread + // obtains the lock. + int64_t wait_start_time = CycleClock::Now(); + uint32_t wait_cycles = 0; + int lock_wait_call_count = 0; + while ((lock_value & kSpinLockHeld) != 0) { + // If the lock is currently held, but not marked as having a sleeper, mark + // it as having a sleeper. + if ((lock_value & kWaitTimeMask) == 0) { + // Here, just "mark" that the thread is going to sleep. Don't store the + // lock wait time in the lock -- the lock word stores the amount of time + // that the current holder waited before acquiring the lock, not the wait + // time of any thread currently waiting to acquire it. + if (lockword_.compare_exchange_strong( + lock_value, lock_value | kSpinLockSleeper, + std::memory_order_relaxed, std::memory_order_relaxed)) { + // Successfully transitioned to kSpinLockSleeper. Pass + // kSpinLockSleeper to the SpinLockWait routine to properly indicate + // the last lock_value observed. + lock_value |= kSpinLockSleeper; + } else if ((lock_value & kSpinLockHeld) == 0) { + // Lock is free again, so try and acquire it before sleeping. The + // new lock state will be the number of cycles this thread waited if + // this thread obtains the lock. + lock_value = TryLockInternal(lock_value, wait_cycles); + continue; // Skip the delay at the end of the loop. + } else if ((lock_value & kWaitTimeMask) == 0) { + // The lock is still held, without a waiter being marked, but something + // else about the lock word changed, causing our CAS to fail. For + // example, a new lock holder may have acquired the lock with + // kSpinLockDisabledScheduling set, whereas the previous holder had not + // set that flag. In this case, attempt again to mark ourselves as a + // waiter. + continue; + } + } + + // SpinLockDelay() calls into fiber scheduler, we need to see + // synchronization there to avoid false positives. + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + // Wait for an OS specific delay. + base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + // Spin again after returning from the wait routine to give this thread + // some chance of obtaining the lock. + lock_value = SpinLoop(); + wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now()); + lock_value = TryLockInternal(lock_value, wait_cycles); + } +} + +void SpinLock::SlowUnlock(uint32_t lock_value) { + base_internal::SpinLockWake(&lockword_, + false); // wake waiter if necessary + + // If our acquisition was contended, collect contentionz profile info. We + // reserve a unitary wait time to represent that a waiter exists without our + // own acquisition having been contended. + if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { + const int64_t wait_cycles = DecodeWaitCycles(lock_value); + ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); + submit_profile_data(this, wait_cycles); + ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); + } +} + +// We use the upper 29 bits of the lock word to store the time spent waiting to +// acquire this lock. This is reported by contentionz profiling. Since the +// lower bits of the cycle counter wrap very quickly on high-frequency +// processors we divide to reduce the granularity to 2^kProfileTimestampShift +// sized units. On a 4Ghz machine this will lose track of wait times greater +// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. +static constexpr int kProfileTimestampShift = 7; + +// We currently reserve the lower 3 bits. +static constexpr int kLockwordReservedShift = 3; + +uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + static const int64_t kMaxWaitTime = + std::numeric_limits::max() >> kLockwordReservedShift; + int64_t scaled_wait_time = + (wait_end_time - wait_start_time) >> kProfileTimestampShift; + + // Return a representation of the time spent waiting that can be stored in + // the lock word's upper bits. + uint32_t clamped = static_cast( + std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift); + + if (clamped == 0) { + return kSpinLockSleeper; // Just wake waiters, but don't record contention. + } + // Bump up value if necessary to avoid returning kSpinLockSleeper. + const uint32_t kMinWaitTime = + kSpinLockSleeper + (1 << kLockwordReservedShift); + if (clamped == kSpinLockSleeper) { + return kMinWaitTime; + } + return clamped; +} + +int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { + // Cast to uint32_t first to ensure bits [63:32] are cleared. + const int64_t scaled_wait_time = + static_cast(lock_value & kWaitTimeMask); + return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/spinlock.h b/third_party/absl/absl/base/internal/spinlock.h new file mode 100644 index 000000000..2929cd6f5 --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock.h @@ -0,0 +1,265 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in two situations: +// - for use by Abseil internal code that Mutex itself depends on +// - for async signal safety (see below) + +// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async +// signal safe. If a spinlock is used within a signal handler, all code that +// acquires the lock must ensure that the signal cannot arrive while they are +// holding the lock. Typically, this is done by blocking the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/thread_annotations.h" + +namespace tcmalloc { +namespace tcmalloc_internal { + +class AllocationGuardSpinLockHolder; + +} // namespace tcmalloc_internal +} // namespace tcmalloc + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ABSL_LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } +#else + ~SpinLock() = default; +#endif + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static int64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + bool IsCooperative() const { + return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class ABSL_SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/third_party/absl/absl/base/internal/spinlock_akaros.inc b/third_party/absl/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 000000000..7b0cada4f --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/third_party/absl/absl/base/internal/spinlock_benchmark.cc b/third_party/absl/absl/base/internal/spinlock_benchmark.cc new file mode 100644 index 000000000..1790d9678 --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_benchmark.cc @@ -0,0 +1,80 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock +// and Mutex performance under varying levels of contention. + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/no_destructor.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "benchmark/benchmark.h" + +namespace { + +template +static void BM_TryLock(benchmark::State& state) { + // Ensure a ThreadIdentity is installed so that KERNEL_ONLY has an effect. + ABSL_INTERNAL_CHECK( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != + nullptr, + "GetOrCreateCurrentThreadIdentity() failed"); + + static absl::NoDestructor spinlock( + scheduling_mode); + for (auto _ : state) { + if (spinlock->TryLock()) spinlock->Unlock(); + } +} + +template +static void BM_SpinLock(benchmark::State& state) { + // Ensure a ThreadIdentity is installed so that KERNEL_ONLY has an effect. + ABSL_INTERNAL_CHECK( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != + nullptr, + "GetOrCreateCurrentThreadIdentity() failed"); + + static absl::NoDestructor spinlock( + scheduling_mode); + for (auto _ : state) { + absl::base_internal::SpinLockHolder holder(spinlock.get()); + } +} + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_KERNEL_ONLY) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_SpinLock, + absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_TryLock, absl::base_internal::SCHEDULE_KERNEL_ONLY) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +BENCHMARK_TEMPLATE(BM_TryLock, + absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) + ->UseRealTime() + ->Threads(1) + ->ThreadPerCpu(); + +} // namespace diff --git a/third_party/absl/absl/base/internal/spinlock_linux.inc b/third_party/absl/absl/base/internal/spinlock_linux.inc new file mode 100644 index 000000000..fe8ba674f --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int, + absl::base_internal::SchedulingMode) { + absl::base_internal::ErrnoSaver errno_saver; + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/third_party/absl/absl/base/internal/spinlock_posix.inc b/third_party/absl/absl/base/internal/spinlock_posix.inc new file mode 100644 index 000000000..4f6f887d9 --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include + +#include +#include + +#include "absl/base/internal/errno_saver.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + absl::base_internal::ErrnoSaver errno_saver; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/third_party/absl/absl/base/internal/spinlock_wait.cc b/third_party/absl/absl/base/internal/spinlock_wait.cc new file mode 100644 index 000000000..fa824be1c --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_wait.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The OS-specific header included below must provide two calls: +// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake(). +// See spinlock_wait.h for the specs. + +#include +#include + +#include "absl/base/internal/spinlock_wait.h" + +#if defined(_WIN32) +#include "absl/base/internal/spinlock_win32.inc" +#elif defined(__linux__) +#include "absl/base/internal/spinlock_linux.inc" +#elif defined(__akaros__) +#include "absl/base/internal/spinlock_akaros.inc" +#else +#include "absl/base/internal/spinlock_posix.inc" +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// See spinlock_wait.h for spec. +uint32_t SpinLockWait(std::atomic *w, int n, + const SpinLockWaitTransition trans[], + base_internal::SchedulingMode scheduling_mode) { + int loop = 0; + for (;;) { + uint32_t v = w->load(std::memory_order_acquire); + int i; + for (i = 0; i != n && v != trans[i].from; i++) { + } + if (i == n) { + SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition + } else if (trans[i].to == v || // null transition + w->compare_exchange_strong(v, trans[i].to, + std::memory_order_acquire, + std::memory_order_relaxed)) { + if (trans[i].done) return v; + } + } +} + +static std::atomic delay_rand; + +// Return a suggested delay in nanoseconds for iteration number "loop" +int SpinLockSuggestedDelayNS(int loop) { + // Weak pseudo-random number generator to get some spread between threads + // when many are spinning. + uint64_t r = delay_rand.load(std::memory_order_relaxed); + r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() + delay_rand.store(r, std::memory_order_relaxed); + + if (loop < 0 || loop > 32) { // limit loop to 0..32 + loop = 32; + } + const int kMinDelay = 128 << 10; // 128us + // Double delay every 8 iterations, up to 16x (2ms). + int delay = kMinDelay << (loop / 8); + // Randomize in delay..2*delay range, for resulting 128us..4ms range. + return delay | ((delay - 1) & static_cast(r)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/spinlock_wait.h b/third_party/absl/absl/base/internal/spinlock_wait.h new file mode 100644 index 000000000..9a1adcda5 --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_wait.h @@ -0,0 +1,95 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +#include +#include + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` +// is true, wake all such threads. On some systems, this may be a no-op; on +// those systems, threads calling SpinLockDelay() will always wake eventually +// even if SpinLockWake() is never called. +void SpinLockWake(std::atomic *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a call to SpinLockWake(w). +void SpinLockDelay(std::atomic *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, + bool all); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic *w, + bool all) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/third_party/absl/absl/base/internal/spinlock_win32.inc b/third_party/absl/absl/base/internal/spinlock_win32.inc new file mode 100644 index 000000000..934c2016f --- /dev/null +++ b/third_party/absl/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,40 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + // SpinLockSuggestedDelayNS() always returns a positive integer, so this + // static_cast is safe. + Sleep(static_cast( + absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000)); + } +} + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/third_party/absl/absl/base/internal/strerror.cc b/third_party/absl/absl/base/internal/strerror.cc new file mode 100644 index 000000000..de91c05e0 --- /dev/null +++ b/third_party/absl/absl/base/internal/strerror.cc @@ -0,0 +1,88 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/strerror.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/errno_saver.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { +#if defined(_WIN32) + int rc = strerror_s(buf, buflen, errnum); + buf[buflen - 1] = '\0'; // guarantee NUL termination + if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0'; + return buf; +#else + // The type of `ret` is platform-specific; both of these branches must compile + // either way but only one will execute on any given platform: + auto ret = strerror_r(errnum, buf, buflen); + if (std::is_same::value) { + // XSI `strerror_r`; `ret` is `int`: + if (ret) *buf = '\0'; + return buf; + } else { + // GNU `strerror_r`; `ret` is `char *`: + return reinterpret_cast(ret); + } +#endif +} + +std::string StrErrorInternal(int errnum) { + char buf[100]; + const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); + if (*str == '\0') { + snprintf(buf, sizeof buf, "Unknown error %d", errnum); + str = buf; + } + return str; +} + +// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back +// to `StrErrorAdaptor()` if the value is larger than this. +constexpr int kSysNerr = 135; + +std::array* NewStrErrorTable() { + auto* table = new std::array; + for (size_t i = 0; i < table->size(); ++i) { + (*table)[i] = StrErrorInternal(static_cast(i)); + } + return table; +} + +} // namespace + +std::string StrError(int errnum) { + absl::base_internal::ErrnoSaver errno_saver; + static const auto* table = NewStrErrorTable(); + if (errnum >= 0 && static_cast(errnum) < table->size()) { + return (*table)[static_cast(errnum)]; + } + return StrErrorInternal(errnum); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/strerror.h b/third_party/absl/absl/base/internal/strerror.h new file mode 100644 index 000000000..350097366 --- /dev/null +++ b/third_party/absl/absl/base/internal/strerror.h @@ -0,0 +1,39 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_STRERROR_H_ +#define ABSL_BASE_INTERNAL_STRERROR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A portable and thread-safe alternative to C89's `strerror`. +// +// The C89 specification of `strerror` is not suitable for use in a +// multi-threaded application as the returned string may be changed by calls to +// `strerror` from another thread. The many non-stdlib alternatives differ +// enough in their names, availability, and semantics to justify this wrapper +// around them. `errno` will not be modified by a call to `absl::StrError`. +std::string StrError(int errnum); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/third_party/absl/absl/base/internal/strerror_benchmark.cc b/third_party/absl/absl/base/internal/strerror_benchmark.cc new file mode 100644 index 000000000..c9ab14a89 --- /dev/null +++ b/third_party/absl/absl/base/internal/strerror_benchmark.cc @@ -0,0 +1,29 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "absl/base/internal/strerror.h" +#include "benchmark/benchmark.h" + +namespace { +void BM_AbslStrError(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE)); + } +} +BENCHMARK(BM_AbslStrError); +} // namespace diff --git a/third_party/absl/absl/base/internal/strerror_test.cc b/third_party/absl/absl/base/internal/strerror_test.cc new file mode 100644 index 000000000..e32d5b5c9 --- /dev/null +++ b/third_party/absl/absl/base/internal/strerror_test.cc @@ -0,0 +1,88 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/strerror.h" + +#include +#include +#include +#include +#include +#include // NOLINT(build/c++11) +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/strings/match.h" + +namespace { +using ::testing::AnyOf; +using ::testing::Eq; + +TEST(StrErrorTest, ValidErrorCode) { + errno = ERANGE; + EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM))); + EXPECT_THAT(errno, Eq(ERANGE)); +} + +TEST(StrErrorTest, InvalidErrorCode) { + errno = ERANGE; + EXPECT_THAT(absl::base_internal::StrError(-1), + AnyOf(Eq("No error information"), Eq("Unknown error -1"))); + EXPECT_THAT(errno, Eq(ERANGE)); +} + +TEST(StrErrorTest, MultipleThreads) { + // In this test, we will start up 2 threads and have each one call + // StrError 1000 times, each time with a different errnum. We + // expect that StrError(errnum) will return a string equal to the + // one returned by strerror(errnum), if the code is known. Since + // strerror is known to be thread-hostile, collect all the expected + // strings up front. + const int kNumCodes = 1000; + std::vector expected_strings(kNumCodes); + for (int i = 0; i < kNumCodes; ++i) { + expected_strings[i] = strerror(i); + } + + std::atomic_int counter(0); + auto thread_fun = [&]() { + for (int i = 0; i < kNumCodes; ++i) { + ++counter; + errno = ERANGE; + const std::string value = absl::base_internal::StrError(i); + // EXPECT_* could change errno. Stash it first. + int check_err = errno; + EXPECT_THAT(check_err, Eq(ERANGE)); + // Only the GNU implementation is guaranteed to provide the + // string "Unknown error nnn". POSIX doesn't say anything. + if (!absl::StartsWith(value, "Unknown error ")) { + EXPECT_THAT(value, Eq(expected_strings[i])); + } + } + }; + + const int kNumThreads = 100; + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(thread_fun)); + } + for (auto& thread : threads) { + thread.join(); + } + + EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes)); +} + +} // namespace diff --git a/third_party/absl/absl/base/internal/sysinfo.cc b/third_party/absl/absl/base/internal/sysinfo.cc new file mode 100644 index 000000000..79eaba3e5 --- /dev/null +++ b/third_party/absl/absl/base/internal/sysinfo.cc @@ -0,0 +1,489 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#include "absl/base/attributes.h" + +#ifdef _WIN32 +#include +#else +#include +#include +#include +#include +#include +#endif + +#ifdef __linux__ +#include +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) +#include +#endif + +#ifdef __FreeBSD__ +#include +#endif + +#ifdef __NetBSD__ +#include +#endif + +#if defined(__myriad2__) +#include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/unscaledcycleclock.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +namespace { + +#if defined(_WIN32) + +// Returns number of bits set in `bitMask` +DWORD Win32CountSetBits(ULONG_PTR bitMask) { + for (DWORD bitSetCount = 0; ; ++bitSetCount) { + if (bitMask == 0) return bitSetCount; + bitMask &= bitMask - 1; + } +} + +// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or +// 0 if the number of processors is not available or can not be computed. +// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation +int Win32NumCPUs() { +#pragma comment(lib, "kernel32.lib") + using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; + + DWORD info_size = sizeof(Info); + Info* info(static_cast(malloc(info_size))); + if (info == nullptr) return 0; + + bool success = GetLogicalProcessorInformation(info, &info_size); + if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { + free(info); + info = static_cast(malloc(info_size)); + if (info == nullptr) return 0; + success = GetLogicalProcessorInformation(info, &info_size); + } + + DWORD logicalProcessorCount = 0; + if (success) { + Info* ptr = info; + DWORD byteOffset = 0; + while (byteOffset + sizeof(Info) <= info_size) { + switch (ptr->Relationship) { + case RelationProcessorCore: + logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask); + break; + + case RelationNumaNode: + case RelationCache: + case RelationProcessorPackage: + // Ignore other entries + break; + + default: + // Ignore unknown entries + break; + } + byteOffset += sizeof(Info); + ptr++; + } + } + free(info); + return static_cast(logicalProcessorCount); +} + +#endif + +} // namespace + +static int GetNumCPUs() { +#if defined(__myriad2__) + return 1; +#elif defined(_WIN32) + const int hardware_concurrency = Win32NumCPUs(); + return hardware_concurrency ? hardware_concurrency : 1; +#elif defined(_AIX) + return sysconf(_SC_NPROCESSORS_ONLN); +#else + // Other possibilities: + // - Read /sys/devices/system/cpu/online and use cpumask_parse() + // - sysconf(_SC_NPROCESSORS_ONLN) + return static_cast(std::thread::hardware_concurrency()); +#endif +} + +#if defined(_WIN32) + +static double GetNominalCPUFrequency() { +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + // UWP apps don't have access to the registry and currently don't provide an + // API informing about CPU nominal frequency. + return 1.0; +#else +#pragma comment(lib, "advapi32.lib") // For Reg* functions. + HKEY key; + // Use the Reg* functions rather than the SH functions because shlwapi.dll + // pulls in gdi32.dll which makes process destruction much more costly. + if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, + "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, + KEY_READ, &key) == ERROR_SUCCESS) { + DWORD type = 0; + DWORD data = 0; + DWORD data_size = sizeof(data); + auto result = RegQueryValueExA(key, "~MHz", nullptr, &type, + reinterpret_cast(&data), &data_size); + RegCloseKey(key); + if (result == ERROR_SUCCESS && type == REG_DWORD && + data_size == sizeof(data)) { + return data * 1e6; // Value is MHz. + } + } + return 1.0; +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP +} + +#elif defined(CTL_HW) && defined(HW_CPU_FREQ) + +static double GetNominalCPUFrequency() { + unsigned freq; + size_t size = sizeof(freq); + int mib[2] = {CTL_HW, HW_CPU_FREQ}; + if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { + return static_cast(freq); + } + return 1.0; +} + +#else + +// Helper function for reading a long from a file. Returns true if successful +// and the memory location pointed to by value is set to the value read. +static bool ReadLongFromFile(const char *file, long *value) { + bool ret = false; +#if defined(_POSIX_C_SOURCE) + const int file_mode = (O_RDONLY | O_CLOEXEC); +#else + const int file_mode = O_RDONLY; +#endif + + int fd = open(file, file_mode); + if (fd != -1) { + char line[1024]; + char *err; + memset(line, '\0', sizeof(line)); + ssize_t len; + do { + len = read(fd, line, sizeof(line) - 1); + } while (len < 0 && errno == EINTR); + if (len <= 0) { + ret = false; + } else { + const long temp_value = strtol(line, &err, 10); + if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { + *value = temp_value; + ret = true; + } + } + close(fd); + } + return ret; +} + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + +// Reads a monotonic time source and returns a value in +// nanoseconds. The returned value uses an arbitrary epoch, not the +// Unix epoch. +static int64_t ReadMonotonicClockNanos() { + struct timespec t; +#ifdef CLOCK_MONOTONIC_RAW + int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); +#else + int rc = clock_gettime(CLOCK_MONOTONIC, &t); +#endif + if (rc != 0) { + ABSL_INTERNAL_LOG( + FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")"); + } + return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; +} + +class UnscaledCycleClockWrapperForInitializeFrequency { + public: + static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } +}; + +struct TimeTscPair { + int64_t time; // From ReadMonotonicClockNanos(). + int64_t tsc; // From UnscaledCycleClock::Now(). +}; + +// Returns a pair of values (monotonic kernel time, TSC ticks) that +// approximately correspond to each other. This is accomplished by +// doing several reads and picking the reading with the lowest +// latency. This approach is used to minimize the probability that +// our thread was preempted between clock reads. +static TimeTscPair GetTimeTscPair() { + int64_t best_latency = std::numeric_limits::max(); + TimeTscPair best; + for (int i = 0; i < 10; ++i) { + int64_t t0 = ReadMonotonicClockNanos(); + int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); + int64_t t1 = ReadMonotonicClockNanos(); + int64_t latency = t1 - t0; + if (latency < best_latency) { + best_latency = latency; + best.time = t0; + best.tsc = tsc; + } + } + return best; +} + +// Measures and returns the TSC frequency by taking a pair of +// measurements approximately `sleep_nanoseconds` apart. +static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { + auto t0 = GetTimeTscPair(); + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = sleep_nanoseconds; + while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} + auto t1 = GetTimeTscPair(); + double elapsed_ticks = t1.tsc - t0.tsc; + double elapsed_time = (t1.time - t0.time) * 1e-9; + return elapsed_ticks / elapsed_time; +} + +// Measures and returns the TSC frequency by calling +// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the +// frequency measurement stabilizes. +static double MeasureTscFrequency() { + double last_measurement = -1.0; + int sleep_nanoseconds = 1000000; // 1 millisecond. + for (int i = 0; i < 8; ++i) { + double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); + if (measurement * 0.99 < last_measurement && + last_measurement < measurement * 1.01) { + // Use the current measurement if it is within 1% of the + // previous measurement. + return measurement; + } + last_measurement = measurement; + sleep_nanoseconds *= 2; + } + return last_measurement; +} + +#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + +static double GetNominalCPUFrequency() { + long freq = 0; + + // Google's production kernel has a patch to export the TSC + // frequency through sysfs. If the kernel is exporting the TSC + // frequency use that. There are issues where cpuinfo_max_freq + // cannot be relied on because the BIOS may be exporting an invalid + // p-state (on x86) or p-states may be used to put the processor in + // a new mode (turbo mode). Essentially, those frequencies cannot + // always be relied upon. The same reasons apply to /proc/cpuinfo as + // well. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { + return freq * 1e3; // Value is kHz. + } + +#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) + // On these platforms, the TSC frequency is the nominal CPU + // frequency. But without having the kernel export it directly + // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no + // other way to reliably get the TSC frequency, so we have to + // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by + // exporting "fake" frequencies for implementing new features. For + // example, Intel's turbo mode is enabled by exposing a p-state + // value with a higher frequency than that of the real TSC + // rate. Because of this, we prefer to measure the TSC rate + // ourselves on i386 and x86-64. + return MeasureTscFrequency(); +#else + + // If CPU scaling is in effect, we want to use the *maximum* + // frequency, not whatever CPU speed some random processor happens + // to be using now. + if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + &freq)) { + return freq * 1e3; // Value is kHz. + } + + return 1.0; +#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +} + +#endif + +ABSL_CONST_INIT static once_flag init_num_cpus_once; +ABSL_CONST_INIT static int num_cpus = 0; + +// NumCPUs() may be called before main() and before malloc is properly +// initialized, therefore this must not allocate memory. +int NumCPUs() { + base_internal::LowLevelCallOnce( + &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); }); + return num_cpus; +} + +// A default frequency of 0.0 might be dangerous if it is used in division. +ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once; +ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0; + +// NominalCPUFrequency() may be called before main() and before malloc is +// properly initialized, therefore this must not allocate memory. +double NominalCPUFrequency() { + base_internal::LowLevelCallOnce( + &init_nominal_cpu_frequency_once, + []() { nominal_cpu_frequency = GetNominalCPUFrequency(); }); + return nominal_cpu_frequency; +} + +#if defined(_WIN32) + +pid_t GetTID() { + return pid_t{GetCurrentThreadId()}; +} + +#elif defined(__linux__) + +#ifndef SYS_gettid +#define SYS_gettid __NR_gettid +#endif + +pid_t GetTID() { + return static_cast(syscall(SYS_gettid)); +} + +#elif defined(__akaros__) + +pid_t GetTID() { + // Akaros has a concept of "vcore context", which is the state the program + // is forced into when we need to make a user-level scheduling decision, or + // run a signal handler. This is analogous to the interrupt context that a + // CPU might enter if it encounters some kind of exception. + // + // There is no current thread context in vcore context, but we need to give + // a reasonable answer if asked for a thread ID (e.g., in a signal handler). + // Thread 0 always exists, so if we are in vcore context, we return that. + // + // Otherwise, we know (since we are using pthreads) that the uthread struct + // current_uthread is pointing to is the first element of a + // struct pthread_tcb, so we extract and return the thread ID from that. + // + // TODO(dcross): Akaros anticipates moving the thread ID to the uthread + // structure at some point. We should modify this code to remove the cast + // when that happens. + if (in_vcore_context()) + return 0; + return reinterpret_cast(current_uthread)->id; +} + +#elif defined(__myriad2__) + +pid_t GetTID() { + uint32_t tid; + rtems_task_ident(RTEMS_SELF, 0, &tid); + return tid; +} + +#elif defined(__APPLE__) + +pid_t GetTID() { + uint64_t tid; + // `nullptr` here implies this thread. This only fails if the specified + // thread is invalid or the pointer-to-tid is null, so we needn't worry about + // it. + pthread_threadid_np(nullptr, &tid); + return static_cast(tid); +} + +#elif defined(__FreeBSD__) + +pid_t GetTID() { return static_cast(pthread_getthreadid_np()); } + +#elif defined(__OpenBSD__) + +pid_t GetTID() { return getthrid(); } + +#elif defined(__NetBSD__) + +pid_t GetTID() { return static_cast(_lwp_self()); } + +#elif defined(__native_client__) + +pid_t GetTID() { + auto* thread = pthread_self(); + static_assert(sizeof(pid_t) == sizeof(thread), + "In NaCL int expected to be the same size as a pointer"); + return reinterpret_cast(thread); +} + +#else + +// Fallback implementation of `GetTID` using `pthread_self`. +pid_t GetTID() { + // `pthread_t` need not be arithmetic per POSIX; platforms where it isn't + // should be handled above. + return static_cast(pthread_self()); +} + +#endif + +// GetCachedTID() caches the thread ID in thread-local storage (which is a +// userspace construct) to avoid unnecessary system calls. Without this caching, +// it can take roughly 98ns, while it takes roughly 1ns with this caching. +pid_t GetCachedTID() { +#ifdef ABSL_HAVE_THREAD_LOCAL + static thread_local pid_t thread_id = GetTID(); + return thread_id; +#else + return GetTID(); +#endif // ABSL_HAVE_THREAD_LOCAL +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/sysinfo.h b/third_party/absl/absl/base/internal/sysinfo.h new file mode 100644 index 000000000..119cf1f0e --- /dev/null +++ b/third_party/absl/absl/base/internal/sysinfo.h @@ -0,0 +1,74 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#endif + +#include + +#include "absl/base/config.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to the +// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned +// 32-bit type. +using pid_t = uint32_t; +#endif +pid_t GetTID(); + +// Like GetTID(), but caches the result in thread-local storage in order +// to avoid unnecessary system calls. Note that there are some cases where +// one must call through to GetTID directly, which is why this exists as a +// separate function. For example, GetCachedTID() is not safe to call in +// an asynchronous signal-handling context nor right after a call to fork(). +pid_t GetCachedTID(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/third_party/absl/absl/base/internal/sysinfo_test.cc b/third_party/absl/absl/base/internal/sysinfo_test.cc new file mode 100644 index 000000000..f305b6c53 --- /dev/null +++ b/third_party/absl/absl/base/internal/sysinfo_test.cc @@ -0,0 +1,88 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/sysinfo.h" + +#ifndef _WIN32 +#include +#include +#endif + +#include // NOLINT(build/c++11) +#include +#include + +#include "gtest/gtest.h" +#include "absl/synchronization/barrier.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +TEST(SysinfoTest, NumCPUs) { + EXPECT_NE(NumCPUs(), 0) + << "NumCPUs() should not have the default value of 0"; +} + +TEST(SysinfoTest, GetTID) { + EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. +#ifdef __native_client__ + // Native Client has a race condition bug that leads to memory + // exaustion when repeatedly creating and joining threads. + // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 + return; +#endif + // Test that TIDs are unique to each thread. + // Uses a few loops to exercise implementations that reallocate IDs. + for (int i = 0; i < 10; ++i) { + constexpr int kNumThreads = 10; + Barrier all_threads_done(kNumThreads); + std::vector threads; + + Mutex mutex; + std::unordered_set tids; + + for (int j = 0; j < kNumThreads; ++j) { + threads.push_back(std::thread([&]() { + pid_t id = GetTID(); + { + MutexLock lock(&mutex); + ASSERT_TRUE(tids.find(id) == tids.end()); + tids.insert(id); + } + // We can't simply join the threads here. The threads need to + // be alive otherwise the TID might have been reallocated to + // another live thread. + all_threads_done.Block(); + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +#ifdef __linux__ +TEST(SysinfoTest, LinuxGetTID) { + // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. + EXPECT_EQ(GetTID(), getpid()); +} +#endif + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/thread_identity.cc b/third_party/absl/absl/base/internal/thread_identity.cc new file mode 100644 index 000000000..0471a25dc --- /dev/null +++ b/third_party/absl/absl/base/internal/thread_identity.cc @@ -0,0 +1,163 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#if !defined(_WIN32) || defined(__MINGW32__) +#include +#ifndef __wasi__ +// WASI does not provide this header, either way we disable use +// of signals with it below. +#include +#endif +#endif + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +namespace { +// Used to co-ordinate one-time creation of our pthread_key +absl::once_flag init_thread_identity_key_once; +pthread_key_t thread_identity_pthread_key; +std::atomic pthread_key_initialized(false); + +void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { + pthread_key_create(&thread_identity_pthread_key, reclaimer); + pthread_key_initialized.store(true, std::memory_order_release); +} +} // namespace +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +// The actual TLS storage for a thread's currently associated ThreadIdentity. +// This is referenced by inline accessors in the header. +// "protected" visibility ensures that if multiple instances of Abseil code +// exist within a process (via dlopen() or similar), references to +// thread_identity_ptr from each instance of the code will refer to +// *different* instances of this ptr. +// Apple platforms have the visibility attribute, but issue a compile warning +// that protected visibility is unsupported. +ABSL_CONST_INIT // Must come before __attribute__((visibility("protected"))) +#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) + __attribute__((visibility("protected"))) +#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) +#if ABSL_PER_THREAD_TLS + // Prefer __thread to thread_local as benchmarks indicate it is a bit + // faster. + ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) + thread_local ThreadIdentity* thread_identity_ptr = nullptr; +#endif // ABSL_PER_THREAD_TLS +#endif // TLS or CPP11 + +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer) { + assert(CurrentThreadIdentityIfPresent() == nullptr); + // Associate our destructor. + // NOTE: This call to pthread_setspecific is currently the only immovable + // barrier to CurrentThreadIdentity() always being async signal safe. +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + +#if defined(__wasi__) || defined(__EMSCRIPTEN__) || defined(__MINGW32__) || \ + defined(__hexagon__) + // Emscripten, WASI and MinGW pthread implementations does not support + // signals. See + // https://kripken.github.io/emscripten-site/docs/porting/pthreads.html for + // more information. + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); +#else + // We must mask signals around the call to setspecific as with current glibc, + // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) + // may zero our value. + // + // While not officially async-signal safe, getspecific within a signal handler + // is otherwise OK. + sigset_t all_signals; + sigset_t curr_signals; + sigfillset(&all_signals); + pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); +#endif // !__EMSCRIPTEN__ && !__MINGW32__ + +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS + // NOTE: Not async-safe. But can be open-coded. + absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, + reclaimer); + pthread_setspecific(thread_identity_pthread_key, + reinterpret_cast(identity)); + thread_identity_ptr = identity; +#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_local std::unique_ptr + holder(identity, reclaimer); + thread_identity_ptr = identity; +#else +#error Unimplemented ABSL_THREAD_IDENTITY_MODE +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +// Please see the comment on `CurrentThreadIdentityIfPresent` in +// thread_identity.h. When we cannot expose thread_local variables in +// headers, we opt for the correct-but-slower option of not inlining this +// function. +#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } +#endif +#endif + +void ClearCurrentThreadIdentity() { +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + thread_identity_ptr = nullptr; +#elif ABSL_THREAD_IDENTITY_MODE == \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + // pthread_setspecific expected to clear value on destruction + assert(CurrentThreadIdentityIfPresent() == nullptr); +#endif +} + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +ThreadIdentity* CurrentThreadIdentityIfPresent() { + bool initialized = pthread_key_initialized.load(std::memory_order_acquire); + if (!initialized) { + return nullptr; + } + return reinterpret_cast( + pthread_getspecific(thread_identity_pthread_key)); +} +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/thread_identity.h b/third_party/absl/absl/base/internal/thread_identity.h new file mode 100644 index 000000000..b6e917ce8 --- /dev/null +++ b/third_party/absl/absl/base/internal/thread_identity.h @@ -0,0 +1,269 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of absl::Mutex and absl::CondVar. +struct PerThreadSynch { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast(this); + } + + PerThreadSynch* next; // Circular waiter queue; initialized to 0. + PerThreadSynch* skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { kAvailable, kQueued }; + std::atomic state; + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; + + intptr_t readers; // Number of readers in mutex. + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld* all_locks; +}; + +// The instances of this class are allocated in NewThreadIdentity() with an +// alignment of PerThreadSynch::kAlignment. +// +// NOTE: The layout of fields in this structure is critical, please do not +// add, remove, or modify the field placements without fully auditing the +// layout. +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + alignas(void*) char data[256]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. Note that +// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks +// on Apple platforms. Whatever function is calling your MallocHooks will need +// to watch for recursion on Apple platforms. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be directly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS +ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +// thread_local variables cannot be in headers exposed by DLLs or in certain +// build configurations on Apple platforms. However, it is important for +// performance reasons in general that `CurrentThreadIdentityIfPresent` be +// inlined. In the other cases we opt to have the function not be inlined. Note +// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude +// this entire inline definition. +#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 +#endif + +#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} +#endif + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/third_party/absl/absl/base/internal/thread_identity_benchmark.cc b/third_party/absl/absl/base/internal/thread_identity_benchmark.cc new file mode 100644 index 000000000..0ae10f2b1 --- /dev/null +++ b/third_party/absl/absl/base/internal/thread_identity_benchmark.cc @@ -0,0 +1,38 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" +#include "absl/base/internal/thread_identity.h" +#include "absl/synchronization/internal/create_thread_identity.h" +#include "absl/synchronization/internal/per_thread_sem.h" + +namespace { + +void BM_SafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()); + } +} +BENCHMARK(BM_SafeCurrentThreadIdentity); + +void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize( + absl::base_internal::CurrentThreadIdentityIfPresent()); + } +} +BENCHMARK(BM_UnsafeCurrentThreadIdentity); + +} // namespace diff --git a/third_party/absl/absl/base/internal/thread_identity_test.cc b/third_party/absl/absl/base/internal/thread_identity_test.cc new file mode 100644 index 000000000..5f17553e6 --- /dev/null +++ b/third_party/absl/absl/base/internal/thread_identity_test.cc @@ -0,0 +1,129 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/thread_identity.h" + +#include // NOLINT(build/c++11) +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/internal/per_thread_sem.h" +#include "absl/synchronization/mutex.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock); + +static const void* const kCheckNoIdentity = reinterpret_cast(1); + +static void TestThreadIdentityCurrent(const void* assert_no_identity) { + ThreadIdentity* identity; + + // We have to test this conditionally, because if the test framework relies + // on Abseil, then some previous action may have already allocated an + // identity. + if (assert_no_identity == kCheckNoIdentity) { + identity = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == nullptr); + } + + identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); + EXPECT_TRUE(identity != nullptr); + ThreadIdentity* identity_no_init; + identity_no_init = CurrentThreadIdentityIfPresent(); + EXPECT_TRUE(identity == identity_no_init); + + // Check that per_thread_synch is correctly aligned. + EXPECT_EQ(0, reinterpret_cast(&identity->per_thread_synch) % + PerThreadSynch::kAlignment); + EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); + + absl::base_internal::SpinLockHolder l(&map_lock); + num_identities_reused++; +} + +TEST(ThreadIdentityTest, BasicIdentityWorks) { + // This tests for the main() thread. + TestThreadIdentityCurrent(nullptr); +} + +TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { + // Now try the same basic test with multiple threads being created and + // destroyed. This makes sure that: + // - New threads are created without a ThreadIdentity. + // - We re-allocate ThreadIdentity objects from the free-list. + // - If a thread implementation chooses to recycle threads, that + // correct re-initialization occurs. + static const int kNumLoops = 3; + static const int kNumThreads = 32; + for (int iter = 0; iter < kNumLoops; iter++) { + std::vector threads; + for (int i = 0; i < kNumThreads; ++i) { + threads.push_back( + std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); + } + for (auto& thread : threads) { + thread.join(); + } + } + + // We should have recycled ThreadIdentity objects above; while (external) + // library threads allocating their own identities may preclude some + // reuse, we should have sufficient repetitions to exclude this. + absl::base_internal::SpinLockHolder l(&map_lock); + EXPECT_LT(kNumThreads, num_identities_reused); +} + +TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { + // This test repeatedly creates and joins a series of threads, each of + // which acquires and releases shared Mutex locks. This verifies + // Mutex operations work correctly under a reused + // ThreadIdentity. Note that the most likely failure mode of this + // test is a crash or deadlock. + static const int kNumLoops = 10; + static const int kNumThreads = 12; + static const int kNumMutexes = 3; + static const int kNumLockLoops = 5; + + Mutex mutexes[kNumMutexes]; + for (int iter = 0; iter < kNumLoops; ++iter) { + std::vector threads; + for (int thread = 0; thread < kNumThreads; ++thread) { + threads.push_back(std::thread([&]() { + for (int l = 0; l < kNumLockLoops; ++l) { + for (int m = 0; m < kNumMutexes; ++m) { + MutexLock lock(&mutexes[m]); + } + } + })); + } + for (auto& thread : threads) { + thread.join(); + } + } +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/throw_delegate.cc b/third_party/absl/absl/base/internal/throw_delegate.cc new file mode 100644 index 000000000..337e870cd --- /dev/null +++ b/third_party/absl/absl/base/internal/throw_delegate.cc @@ -0,0 +1,203 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// NOTE: The exception types, like `std::logic_error`, do not exist on all +// platforms. (For example, the Android NDK does not have them.) +// Therefore, their use must be guarded by `#ifdef` or equivalent. + +void ThrowStdLogicError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::logic_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdLogicError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::logic_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} +void ThrowStdInvalidArgument(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::invalid_argument(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdInvalidArgument(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::invalid_argument(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdDomainError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::domain_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdDomainError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::domain_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdLengthError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::length_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdLengthError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::length_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdOutOfRange(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::out_of_range(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdOutOfRange(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::out_of_range(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdRuntimeError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::runtime_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdRuntimeError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::runtime_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdRangeError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::range_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdRangeError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::range_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdOverflowError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::overflow_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdOverflowError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::overflow_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdUnderflowError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::underflow_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif +} +void ThrowStdUnderflowError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::underflow_error(what_arg); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif +} + +void ThrowStdBadFunctionCall() { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::bad_function_call(); +#else + std::abort(); +#endif +} + +void ThrowStdBadAlloc() { +#ifdef ABSL_HAVE_EXCEPTIONS + throw std::bad_alloc(); +#else + std::abort(); +#endif +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/internal/throw_delegate.h b/third_party/absl/absl/base/internal/throw_delegate.h new file mode 100644 index 000000000..075f52725 --- /dev/null +++ b/third_party/absl/absl/base/internal/throw_delegate.h @@ -0,0 +1,75 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/third_party/absl/absl/base/internal/tsan_mutex_interface.h b/third_party/absl/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 000000000..39207d8a5 --- /dev/null +++ b/third_party/absl/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,68 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#include "absl/base/config.h" + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) +#if __has_include() +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/third_party/absl/absl/base/internal/unaligned_access.h b/third_party/absl/absl/base/internal/unaligned_access.h new file mode 100644 index 000000000..4fea45749 --- /dev/null +++ b/third_party/absl/absl/base/internal/unaligned_access.h @@ -0,0 +1,89 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/nullability.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(absl::Nonnull p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(absl::Nonnull p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(absl::Nonnull p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(absl::Nonnull p, uint16_t v) { + memcpy(p, &v, sizeof v); +} + +inline void UnalignedStore32(absl::Nonnull p, uint32_t v) { + memcpy(p, &v, sizeof v); +} + +inline void UnalignedStore64(absl::Nonnull p, uint64_t v) { + memcpy(p, &v, sizeof v); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/third_party/absl/absl/base/internal/unique_small_name_test.cc b/third_party/absl/absl/base/internal/unique_small_name_test.cc new file mode 100644 index 000000000..ff8c2b3fb --- /dev/null +++ b/third_party/absl/absl/base/internal/unique_small_name_test.cc @@ -0,0 +1,77 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "absl/base/optimization.h" +#include "absl/strings/string_view.h" + +// This test by itself does not do anything fancy, but it serves as binary I can +// query in shell test. + +namespace { + +template +void DoNotOptimize(const T& var) { +#ifdef __GNUC__ + asm volatile("" : "+m"(const_cast(var))); +#else + std::cout << (void*)&var; +#endif +} + +int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0; +char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc"; + +TEST(UniqueSmallName, NonAutomaticVar) { + EXPECT_EQ(very_long_int_variable_name, 0); + EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc"); +} + +int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + +TEST(UniqueSmallName, FreeFunction) { + DoNotOptimize(&VeryLongFreeFunctionName); + + EXPECT_EQ(VeryLongFreeFunctionName(), 456); +} + +int VeryLongFreeFunctionName() { return 456; } + +struct VeryLongStructName { + explicit VeryLongStructName(int i); + + int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + + static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); + + private: + int fld; +}; + +TEST(UniqueSmallName, Struct) { + VeryLongStructName var(10); + + DoNotOptimize(var); + DoNotOptimize(&VeryLongStructName::VeryLongMethodName); + DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName); + + EXPECT_EQ(var.VeryLongMethodName(), 10); + EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123); +} + +VeryLongStructName::VeryLongStructName(int i) : fld(i) {} +int VeryLongStructName::VeryLongMethodName() { return fld; } +int VeryLongStructName::VeryLongStaticMethodName() { return 123; } + +} // namespace diff --git a/third_party/absl/absl/base/internal/unscaledcycleclock.cc b/third_party/absl/absl/base/internal/unscaledcycleclock.cc new file mode 100644 index 000000000..05e0e7ba5 --- /dev/null +++ b/third_party/absl/absl/base/internal/unscaledcycleclock.cc @@ -0,0 +1,152 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/unscaledcycleclock.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +#if defined(_WIN32) +#include +#endif + +#if defined(__powerpc__) || defined(__ppc__) +#ifdef __GLIBC__ +#include +#elif defined(__FreeBSD__) +// clang-format off +// This order does actually matter =(. +#include +#include +// clang-format on + +#include "absl/base/call_once.h" +#endif +#endif + +#include "absl/base/internal/sysinfo.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if defined(__i386__) + +int64_t UnscaledCycleClock::Now() { + int64_t ret; + __asm__ volatile("rdtsc" : "=A"(ret)); + return ret; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__x86_64__) + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(__powerpc__) || defined(__ppc__) + +int64_t UnscaledCycleClock::Now() { +#ifdef __GLIBC__ + return __ppc_get_timebase(); +#else +#ifdef __powerpc64__ + int64_t tbr; + asm volatile("mfspr %0, 268" : "=r"(tbr)); + return tbr; +#else + int32_t tbu, tbl, tmp; + asm volatile( + "mftbu %[hi32]\n" + "mftb %[lo32]\n" + "mftbu %[tmp]\n" + "cmpw %[tmp],%[hi32]\n" + "bne $-16\n" // Retry on failure. + : [hi32] "=r"(tbu), [lo32] "=r"(tbl), [tmp] "=r"(tmp)); + return (static_cast(tbu) << 32) | tbl; +#endif +#endif +} + +double UnscaledCycleClock::Frequency() { +#ifdef __GLIBC__ + return __ppc_get_timebase_freq(); +#elif defined(_AIX) + // This is the same constant value as returned by + // __ppc_get_timebase_freq(). + return static_cast(512000000); +#elif defined(__FreeBSD__) + static once_flag init_timebase_frequency_once; + static double timebase_frequency = 0.0; + base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() { + size_t length = sizeof(timebase_frequency); + sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency, + &length, nullptr, 0); + }); + return timebase_frequency; +#else +#error Must implement UnscaledCycleClock::Frequency() +#endif +} + +#elif defined(__aarch64__) + +// System timer of ARMv8 runs at a different frequency than the CPU's. +// The frequency is fixed, typically in the range 1-50MHz. It can be +// read at CNTFRQ special register. We assume the OS has set up +// the virtual timer properly. +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + uint64_t aarch64_timer_frequency; + asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); + return aarch64_timer_frequency; +} + +#elif defined(__riscv) + +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("rdcycle %0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#elif defined(_M_IX86) || defined(_M_X64) + +#pragma intrinsic(__rdtsc) + +int64_t UnscaledCycleClock::Now() { return __rdtsc(); } + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/third_party/absl/absl/base/internal/unscaledcycleclock.h b/third_party/absl/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 000000000..cc1276ba0 --- /dev/null +++ b/third_party/absl/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,96 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal use only, you should consider using CycleClock +// instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; + +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Allowed users + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +#if defined(__x86_64__) + +inline int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return static_cast((high << 32) | low); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/third_party/absl/absl/base/internal/unscaledcycleclock_config.h b/third_party/absl/absl/base/internal/unscaledcycleclock_config.h new file mode 100644 index 000000000..24b324ac9 --- /dev/null +++ b/third_party/absl/absl/base/internal/unscaledcycleclock_config.h @@ -0,0 +1,62 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ + +#if defined(__APPLE__) +#include +#endif + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ + defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || (defined(__APPLE__)) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +#endif + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ diff --git a/third_party/absl/absl/base/invoke_test.cc b/third_party/absl/absl/base/invoke_test.cc new file mode 100644 index 000000000..7be26f649 --- /dev/null +++ b/third_party/absl/absl/base/invoke_test.cc @@ -0,0 +1,331 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/invoke.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { +namespace { + +int Function(int a, int b) { return a - b; } + +void VoidFunction(int& a, int& b) { + a += b; + b = a - b; + a -= b; +} + +int ZeroArgFunction() { return -1937; } + +int Sink(std::unique_ptr p) { + return *p; +} + +std::unique_ptr Factory(int n) { + return make_unique(n); +} + +void NoOp() {} + +struct ConstFunctor { + int operator()(int a, int b) const { return a - b; } +}; + +struct MutableFunctor { + int operator()(int a, int b) { return a - b; } +}; + +struct EphemeralFunctor { + int operator()(int a, int b) && { return a - b; } +}; + +struct OverloadedFunctor { + template + std::string operator()(const Args&... args) & { + return StrCat("&", args...); + } + template + std::string operator()(const Args&... args) const& { + return StrCat("const&", args...); + } + template + std::string operator()(const Args&... args) && { + return StrCat("&&", args...); + } +}; + +struct Class { + int Method(int a, int b) { return a - b; } + int ConstMethod(int a, int b) const { return a - b; } + int RefMethod(int a, int b) & { return a - b; } + int RefRefMethod(int a, int b) && { return a - b; } + int NoExceptMethod(int a, int b) noexcept { return a - b; } + int VolatileMethod(int a, int b) volatile { return a - b; } + + int member; +}; + +struct FlipFlop { + int ConstMethod() const { return member; } + FlipFlop operator*() const { return {-member}; } + + int member; +}; + +// CallMaybeWithArg(f) resolves either to invoke(f) or invoke(f, 42), depending +// on which one is valid. +template +decltype(base_internal::invoke(std::declval())) CallMaybeWithArg( + const F& f) { + return base_internal::invoke(f); +} + +template +decltype(base_internal::invoke(std::declval(), 42)) CallMaybeWithArg( + const F& f) { + return base_internal::invoke(f, 42); +} + +TEST(InvokeTest, Function) { + EXPECT_EQ(1, base_internal::invoke(Function, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Function, 3, 2)); +} + +TEST(InvokeTest, NonCopyableArgument) { + EXPECT_EQ(42, base_internal::invoke(Sink, make_unique(42))); +} + +TEST(InvokeTest, NonCopyableResult) { + EXPECT_THAT(base_internal::invoke(Factory, 42), ::testing::Pointee(42)); +} + +TEST(InvokeTest, VoidResult) { base_internal::invoke(NoOp); } + +TEST(InvokeTest, ConstFunctor) { + EXPECT_EQ(1, base_internal::invoke(ConstFunctor(), 3, 2)); +} + +TEST(InvokeTest, MutableFunctor) { + MutableFunctor f; + EXPECT_EQ(1, base_internal::invoke(f, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(MutableFunctor(), 3, 2)); +} + +TEST(InvokeTest, EphemeralFunctor) { + EphemeralFunctor f; + EXPECT_EQ(1, base_internal::invoke(std::move(f), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(EphemeralFunctor(), 3, 2)); +} + +TEST(InvokeTest, OverloadedFunctor) { + OverloadedFunctor f; + const OverloadedFunctor& cf = f; + + EXPECT_EQ("&", base_internal::invoke(f)); + EXPECT_EQ("& 42", base_internal::invoke(f, " 42")); + + EXPECT_EQ("const&", base_internal::invoke(cf)); + EXPECT_EQ("const& 42", base_internal::invoke(cf, " 42")); + + EXPECT_EQ("&&", base_internal::invoke(std::move(f))); + + OverloadedFunctor f2; + EXPECT_EQ("&& 42", base_internal::invoke(std::move(f2), " 42")); +} + +TEST(InvokeTest, ReferenceWrapper) { + ConstFunctor cf; + MutableFunctor mf; + EXPECT_EQ(1, base_internal::invoke(std::cref(cf), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::ref(cf), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::ref(mf), 3, 2)); +} + +TEST(InvokeTest, MemberFunction) { + std::unique_ptr p(new Class); + std::unique_ptr cp(new Class); + std::unique_ptr vp(new Class); + + EXPECT_EQ(1, base_internal::invoke(&Class::Method, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::Method, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::Method, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefRefMethod, std::move(*p), 3, + 2)); // NOLINT + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, *p, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *p, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *cp, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *vp, 3, 2)); + + EXPECT_EQ(1, + base_internal::invoke(&Class::Method, make_unique(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique(), + 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, + make_unique(), 3, 2)); +} + +TEST(InvokeTest, DataMember) { + std::unique_ptr p(new Class{42}); + std::unique_ptr cp(new Class{42}); + EXPECT_EQ(42, base_internal::invoke(&Class::member, p)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, *p)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, p.get())); + + base_internal::invoke(&Class::member, p) = 42; + base_internal::invoke(&Class::member, p.get()) = 42; + + EXPECT_EQ(42, base_internal::invoke(&Class::member, cp)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, *cp)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, cp.get())); +} + +TEST(InvokeTest, FlipFlop) { + FlipFlop obj = {42}; + // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or + // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. + EXPECT_EQ(42, base_internal::invoke(&FlipFlop::ConstMethod, obj)); + EXPECT_EQ(42, base_internal::invoke(&FlipFlop::member, obj)); +} + +TEST(InvokeTest, SfinaeFriendly) { + CallMaybeWithArg(NoOp); + EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); +} + +TEST(IsInvocableRTest, CallableExactMatch) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for exact match of types on a free function"); +} + +TEST(IsInvocableRTest, CallableArgumentConversionMatch) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for convertible argument type"); +} + +TEST(IsInvocableRTest, CallableReturnConversionMatch) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for convertible return type"); +} + +TEST(IsInvocableRTest, CallableReturnVoid) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for void expected and actual return types"); + static_assert( + base_internal::is_invocable_r::value, + "Should be true for void expected and non-void actual return types"); +} + +TEST(IsInvocableRTest, CallableRefQualifierMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for reference constness mismatch"); + static_assert(!base_internal::is_invocable_r::value, + "Should be false for reference value category mismatch"); +} + +TEST(IsInvocableRTest, CallableArgumentTypeMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for argument type mismatch"); +} + +TEST(IsInvocableRTest, CallableReturnTypeMismatch) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for return type mismatch"); +} + +TEST(IsInvocableRTest, CallableTooFewArgs) { + static_assert( + !base_internal::is_invocable_r::value, + "Should be false for too few arguments"); +} + +TEST(IsInvocableRTest, CallableTooManyArgs) { + static_assert(!base_internal::is_invocable_r::value, + "Should be false for too many arguments"); +} + +TEST(IsInvocableRTest, MemberFunctionAndReference) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a member function " + "and class reference"); +} + +TEST(IsInvocableRTest, MemberFunctionAndPointer) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a member function " + "and class pointer"); +} + +TEST(IsInvocableRTest, DataMemberAndReference) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a data member and " + "class reference"); +} + +TEST(IsInvocableRTest, DataMemberAndPointer) { + static_assert(base_internal::is_invocable_r::value, + "Should be true for exact match of types on a data member and " + "class pointer"); +} + +TEST(IsInvocableRTest, CallableZeroArgs) { + static_assert( + base_internal::is_invocable_r::value, + "Should be true for exact match for a zero-arg free function"); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/log_severity.cc b/third_party/absl/absl/base/log_severity.cc new file mode 100644 index 000000000..8e7bbbc9e --- /dev/null +++ b/third_party/absl/absl/base/log_severity.cc @@ -0,0 +1,56 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { + if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); + return os << "absl::LogSeverity(" << static_cast(s) << ")"; +} + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) { + switch (s) { + case absl::LogSeverityAtLeast::kInfo: + case absl::LogSeverityAtLeast::kWarning: + case absl::LogSeverityAtLeast::kError: + case absl::LogSeverityAtLeast::kFatal: + return os << ">=" << static_cast(s); + case absl::LogSeverityAtLeast::kInfinity: + return os << "INFINITY"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) { + switch (s) { + case absl::LogSeverityAtMost::kInfo: + case absl::LogSeverityAtMost::kWarning: + case absl::LogSeverityAtMost::kError: + case absl::LogSeverityAtMost::kFatal: + return os << "<=" << static_cast(s); + case absl::LogSeverityAtMost::kNegativeInfinity: + return os << "NEGATIVE_INFINITY"; + } + return os; +} +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/log_severity.h b/third_party/absl/absl/base/log_severity.h new file mode 100644 index 000000000..de9702ab6 --- /dev/null +++ b/third_party/absl/absl/base/log_severity.h @@ -0,0 +1,185 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_LOG_SEVERITY_H_ +#define ABSL_BASE_LOG_SEVERITY_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::LogSeverity +// +// Four severity levels are defined. Logging APIs should terminate the program +// when a message is logged at severity `kFatal`; the other levels have no +// special semantics. +// +// Values other than the four defined levels (e.g. produced by `static_cast`) +// are valid, but their semantics when passed to a function, macro, or flag +// depend on the function, macro, or flag. The usual behavior is to normalize +// such values to a defined severity level, however in some cases values other +// than the defined levels are useful for comparison. +// +// Example: +// +// // Effectively disables all logging: +// SetMinLogLevel(static_cast(100)); +// +// Abseil flags may be defined with type `LogSeverity`. Dependency layering +// constraints require that the `AbslParseFlag()` overload be declared and +// defined in the flags library itself rather than here. The `AbslUnparseFlag()` +// overload is defined there as well for consistency. +// +// absl::LogSeverity Flag String Representation +// +// An `absl::LogSeverity` has a string representation used for parsing +// command-line flags based on the enumerator name (e.g. `kFatal`) or +// its unprefixed name (without the `k`) in any case-insensitive form. (E.g. +// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an +// unprefixed string representation in all caps (e.g. "FATAL") or an integer. +// +// Additionally, the parser accepts arbitrary integers (as if the type were +// `int`). +// +// Examples: +// +// --my_log_level=kInfo +// --my_log_level=INFO +// --my_log_level=info +// --my_log_level=0 +// +// `DFATAL` and `kLogDebugFatal` are similarly accepted. +// +// Unparsing a flag produces the same result as `absl::LogSeverityName()` for +// the standard levels and a base-ten integer otherwise. +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +// LogSeverities() +// +// Returns an iterable of all standard `absl::LogSeverity` values, ordered from +// least to most severe. +constexpr std::array LogSeverities() { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, + absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; +} + +// `absl::kLogDebugFatal` equals `absl::LogSeverity::kFatal` in debug builds +// (i.e. when `NDEBUG` is not defined) and `absl::LogSeverity::kError` +// otherwise. Avoid ODR-using this variable as it has internal linkage and thus +// distinct storage in different TUs. +#ifdef NDEBUG +static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kError; +#else +static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kFatal; +#endif + +// LogSeverityName() +// +// Returns the all-caps string representation (e.g. "INFO") of the specified +// severity level if it is one of the standard levels and "UNKNOWN" otherwise. +constexpr const char* LogSeverityName(absl::LogSeverity s) { + switch (s) { + case absl::LogSeverity::kInfo: return "INFO"; + case absl::LogSeverity::kWarning: return "WARNING"; + case absl::LogSeverity::kError: return "ERROR"; + case absl::LogSeverity::kFatal: return "FATAL"; + } + return "UNKNOWN"; +} + +// NormalizeLogSeverity() +// +// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` +// normalize to `kError` (**NOT** `kFatal`). +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + absl::LogSeverity n = s; + if (n < absl::LogSeverity::kInfo) n = absl::LogSeverity::kInfo; + if (n > absl::LogSeverity::kFatal) n = absl::LogSeverity::kError; + return n; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return absl::NormalizeLogSeverity(static_cast(s)); +} + +// operator<< +// +// The exact representation of a streamed `absl::LogSeverity` is deliberately +// unspecified; do not rely on it. +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + +// Enums representing a lower bound for LogSeverity. APIs that only operate on +// messages of at least a certain level (for example, `SetMinLogLevel()`) use +// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is +// a level above all threshold levels and therefore no log message will +// ever meet this threshold. +enum class LogSeverityAtLeast : int { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + +// Enums representing an upper bound for LogSeverity. APIs that only operate on +// messages of at most a certain level (for example, buffer all messages at or +// below a certain level) use this type to specify that level. +// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold +// levels and therefore will exclude all log messages. +enum class LogSeverityAtMost : int { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); + +#define COMPOP(op1, op2, T) \ + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ + return lhs op2 static_cast(rhs); \ + } + +// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ +// `LogSeverityAtMost` are only supported in one direction. +// Valid checks are: +// LogSeverity >= LogSeverityAtLeast +// LogSeverity < LogSeverityAtLeast +// LogSeverity <= LogSeverityAtMost +// LogSeverity > LogSeverityAtMost +COMPOP(>, <, LogSeverityAtLeast) +COMPOP(<=, >=, LogSeverityAtLeast) +COMPOP(<, >, LogSeverityAtMost) +COMPOP(>=, <=, LogSeverityAtMost) +#undef COMPOP + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_LOG_SEVERITY_H_ diff --git a/third_party/absl/absl/base/log_severity_test.cc b/third_party/absl/absl/base/log_severity_test.cc new file mode 100644 index 000000000..3394ecd79 --- /dev/null +++ b/third_party/absl/absl/base/log_severity_test.cc @@ -0,0 +1,251 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/log_severity.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/flags/internal/flag.h" +#include "absl/flags/marshalling.h" +#include "absl/strings/str_cat.h" + +namespace { +using ::testing::Eq; +using ::testing::IsFalse; +using ::testing::IsTrue; +using ::testing::TestWithParam; +using ::testing::Values; + +template +std::string StreamHelper(T value) { + std::ostringstream stream; + stream << value; + return stream.str(); +} + +TEST(StreamTest, Works) { + EXPECT_THAT(StreamHelper(static_cast(-100)), + Eq("absl::LogSeverity(-100)")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL")); + EXPECT_THAT(StreamHelper(static_cast(4)), + Eq("absl::LogSeverity(4)")); +} + +static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage< + absl::LogSeverity>::value, + "Flags of type absl::LogSeverity ought to be lock-free."); + +using ParseFlagFromOutOfRangeIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromOutOfRangeIntegerTest, + Values(static_cast(std::numeric_limits::min()) - 1, + static_cast(std::numeric_limits::max()) + 1)); +TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) { + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, + ParseFlagFromAlmostOutOfRangeIntegerTest, + Values(std::numeric_limits::min(), + std::numeric_limits::max())); +TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) { + const auto expected = static_cast(GetParam()); + const std::string to_parse = absl::StrCat(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromIntegerMatchingEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest, + Values(std::make_tuple("0", absl::LogSeverity::kInfo), + std::make_tuple(" 0", absl::LogSeverity::kInfo), + std::make_tuple("-0", absl::LogSeverity::kInfo), + std::make_tuple("+0", absl::LogSeverity::kInfo), + std::make_tuple("00", absl::LogSeverity::kInfo), + std::make_tuple("0 ", absl::LogSeverity::kInfo), + std::make_tuple("0x0", absl::LogSeverity::kInfo), + std::make_tuple("1", absl::LogSeverity::kWarning), + std::make_tuple("+1", absl::LogSeverity::kWarning), + std::make_tuple("2", absl::LogSeverity::kError), + std::make_tuple("3", absl::LogSeverity::kFatal))); +TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromOtherIntegerTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest, + Values(std::make_tuple("-1", -1), + std::make_tuple("4", 4), + std::make_tuple("010", 10), + std::make_tuple("0x10", 16))); +TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const auto expected = static_cast(std::get<1>(GetParam())); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, ParseFlagFromEnumeratorTest, + Values(std::make_tuple("INFO", absl::LogSeverity::kInfo), + std::make_tuple("info", absl::LogSeverity::kInfo), + std::make_tuple("kInfo", absl::LogSeverity::kInfo), + std::make_tuple("iNfO", absl::LogSeverity::kInfo), + std::make_tuple("kInFo", absl::LogSeverity::kInfo), + std::make_tuple("WARNING", absl::LogSeverity::kWarning), + std::make_tuple("warning", absl::LogSeverity::kWarning), + std::make_tuple("kWarning", absl::LogSeverity::kWarning), + std::make_tuple("WaRnInG", absl::LogSeverity::kWarning), + std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning), + std::make_tuple("ERROR", absl::LogSeverity::kError), + std::make_tuple("error", absl::LogSeverity::kError), + std::make_tuple("kError", absl::LogSeverity::kError), + std::make_tuple("eRrOr", absl::LogSeverity::kError), + std::make_tuple("kErRoR", absl::LogSeverity::kError), + std::make_tuple("FATAL", absl::LogSeverity::kFatal), + std::make_tuple("fatal", absl::LogSeverity::kFatal), + std::make_tuple("kFatal", absl::LogSeverity::kFatal), + std::make_tuple("FaTaL", absl::LogSeverity::kFatal), + std::make_tuple("KfAtAl", absl::LogSeverity::kFatal), + std::make_tuple("DFATAL", absl::kLogDebugFatal), + std::make_tuple("dfatal", absl::kLogDebugFatal), + std::make_tuple("kLogDebugFatal", absl::kLogDebugFatal), + std::make_tuple("dFaTaL", absl::kLogDebugFatal), + std::make_tuple("kLoGdEbUgFaTaL", absl::kLogDebugFatal))); +TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) { + const absl::string_view to_parse = std::get<0>(GetParam()); + const absl::LogSeverity expected = std::get<1>(GetParam()); + absl::LogSeverity value; + std::string error; + ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; + EXPECT_THAT(value, Eq(expected)); +} + +using ParseFlagFromGarbageTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest, + Values("", "\0", " ", "garbage", "kkinfo", "I", + "kDFATAL", "LogDebugFatal", "lOgDeBuGfAtAl")); +TEST_P(ParseFlagFromGarbageTest, ReturnsError) { + const absl::string_view to_parse = GetParam(); + absl::LogSeverity value; + std::string error; + EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; +} + +using UnparseFlagToEnumeratorTest = + TestWithParam>; +INSTANTIATE_TEST_SUITE_P( + Instantiation, UnparseFlagToEnumeratorTest, + Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"), + std::make_tuple(absl::LogSeverity::kWarning, "WARNING"), + std::make_tuple(absl::LogSeverity::kError, "ERROR"), + std::make_tuple(absl::LogSeverity::kFatal, "FATAL"))); +TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = std::get<0>(GetParam()); + const absl::string_view expected = std::get<1>(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} + +using UnparseFlagToOtherIntegerTest = TestWithParam; +INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest, + Values(std::numeric_limits::min(), -1, 4, + std::numeric_limits::max())); +TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { + const absl::LogSeverity to_unparse = + static_cast(GetParam()); + const std::string expected = absl::StrCat(GetParam()); + const std::string stringified_value = absl::UnparseFlag(to_unparse); + EXPECT_THAT(stringified_value, Eq(expected)); + absl::LogSeverity reparsed_value; + std::string error; + EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), + IsTrue()); + EXPECT_THAT(reparsed_value, Eq(to_unparse)); +} + +TEST(LogThresholdTest, LogSeverityAtLeastTest) { + EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal); + EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo); + + EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError); + EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo); +} + +TEST(LogThresholdTest, LogSeverityAtMostTest) { + EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning); + EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal); + + EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError); + EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError); +} + +TEST(LogThresholdTest, Extremes) { + EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity); + EXPECT_GT(absl::LogSeverity::kInfo, + absl::LogSeverityAtMost::kNegativeInfinity); +} + +TEST(LogThresholdTest, Output) { + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning), + Eq(">=WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity), + Eq("INFINITY")); + + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL")); + EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity), + Eq("NEGATIVE_INFINITY")); +} + +} // namespace diff --git a/third_party/absl/absl/base/macros.h b/third_party/absl/absl/base/macros.h new file mode 100644 index 000000000..f33cd1927 --- /dev/null +++ b/third_party/absl/absl/base/macros.h @@ -0,0 +1,141 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace macros_internal { +// Note: this internal template function declaration is used by ABSL_ARRAYSIZE. +// The function doesn't need a definition, as we only use its type. +template +auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; +} // namespace macros_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// https://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF +#if ABSL_HAVE_ATTRIBUTE(enable_if) +#define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) \ + (false ? static_cast(expr) : static_cast(0)) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { assert(false && #expr); }()) // NOLINT +#endif + +// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` +// aborts the program in release mode (when NDEBUG is defined). The +// implementation should abort the program as quickly as possible and ideally it +// should not be possible to ignore the abort request. +#define ABSL_INTERNAL_HARDENING_ABORT() \ + do { \ + ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) + +// ABSL_HARDENING_ASSERT() +// +// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement +// runtime assertions that should be enabled in hardened builds even when +// `NDEBUG` is defined. +// +// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to +// `ABSL_ASSERT()`. +// +// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on +// hardened mode. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +#define ABSL_HARDENING_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) +#else +#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) +#endif + +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW do {} while (false) +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_MACROS_H_ diff --git a/third_party/absl/absl/base/no_destructor.h b/third_party/absl/absl/base/no_destructor.h new file mode 100644 index 000000000..d4b16a6e8 --- /dev/null +++ b/third_party/absl/absl/base/no_destructor.h @@ -0,0 +1,217 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: no_destructor.h +// ----------------------------------------------------------------------------- +// +// This header file defines the absl::NoDestructor wrapper for defining a +// static type that does not need to be destructed upon program exit. Instead, +// such an object survives during program exit (and can be safely accessed at +// any time). +// +// Objects of such type, if constructed safely and under the right conditions, +// provide two main benefits over other alternatives: +// +// * Global objects not normally allowed due to concerns of destruction order +// (i.e. no "complex globals") can be safely allowed, provided that such +// objects can be constant initialized. +// * Function scope static objects can be optimized to avoid heap allocation, +// pointer chasing, and allow lazy construction. +// +// See below for complete details. + + +#ifndef ABSL_BASE_NO_DESTRUCTOR_H_ +#define ABSL_BASE_NO_DESTRUCTOR_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::NoDestructor +// +// NoDestructor is a wrapper around an object of type T that behaves as an +// object of type T but never calls T's destructor. NoDestructor makes it +// safer and/or more efficient to use such objects in static storage contexts: +// as global or function scope static variables. +// +// An instance of absl::NoDestructor has similar type semantics to an +// instance of T: +// +// * Constructs in the same manner as an object of type T through perfect +// forwarding. +// * Provides pointer/reference semantic access to the object of type T via +// `->`, `*`, and `get()`. +// (Note that `const NoDestructor` works like a pointer to const `T`.) +// +// An object of type NoDestructor should be defined in static storage: +// as either a global static object, or as a function scope static variable. +// +// Additionally, NoDestructor provides the following benefits: +// +// * Never calls T's destructor for the object +// * If the object is a function-local static variable, the type can be +// lazily constructed. +// +// An object of type NoDestructor is "trivially destructible" in the notion +// that its destructor is never run. Provided that an object of this type can be +// safely initialized and does not need to be cleaned up on program shutdown, +// NoDestructor allows you to define global static variables, since Google's +// C++ style guide ban on such objects doesn't apply to objects that are +// trivially destructible. +// +// Usage as Global Static Variables +// +// NoDestructor allows declaration of a global object with a non-trivial +// constructor in static storage without needing to add a destructor. +// However, such objects still need to worry about initialization order, so +// such objects should be const initialized: +// +// // Global or namespace scope. +// ABSL_CONST_INIT absl::NoDestructor reg{"foo", "bar", 8008}; +// +// Note that if your object already has a trivial destructor, you don't need to +// use NoDestructor. +// +// Usage as Function Scope Static Variables +// +// Function static objects will be lazily initialized within static storage: +// +// // Function scope. +// const std::string& MyString() { +// static const absl::NoDestructor x("foo"); +// return *x; +// } +// +// For function static variables, NoDestructor avoids heap allocation and can be +// inlined in static storage, resulting in exactly-once, thread-safe +// construction of an object, and very fast access thereafter (the cost is a few +// extra cycles). +// +// Using NoDestructor in this manner is generally better than other patterns +// which require pointer chasing: +// +// // Prefer using absl::NoDestructor instead for the static variable. +// const std::string& MyString() { +// static const std::string* x = new std::string("foo"); +// return *x; +// } +// +template +class NoDestructor { + public: + // Forwards arguments to the T's constructor: calls T(args...). + template &...), + void(NoDestructor&)>::value, + int>::type = 0> + explicit constexpr NoDestructor(Ts&&... args) + : impl_(std::forward(args)...) {} + + // Forwards copy and move construction for T. Enables usage like this: + // static NoDestructor> x{{{"1", "2", "3"}}}; + // static NoDestructor> x{{1, 2, 3}}; + explicit constexpr NoDestructor(const T& x) : impl_(x) {} + explicit constexpr NoDestructor(T&& x) + : impl_(std::move(x)) {} + + // No copying. + NoDestructor(const NoDestructor&) = delete; + NoDestructor& operator=(const NoDestructor&) = delete; + + // Pretend to be a smart pointer to T with deep constness. + // Never returns a null pointer. + T& operator*() { return *get(); } + T* operator->() { return get(); } + T* get() { return impl_.get(); } + const T& operator*() const { return *get(); } + const T* operator->() const { return get(); } + const T* get() const { return impl_.get(); } + + private: + class DirectImpl { + public: + template + explicit constexpr DirectImpl(Args&&... args) + : value_(std::forward(args)...) {} + const T* get() const { return &value_; } + T* get() { return &value_; } + + private: + T value_; + }; + + class PlacementImpl { + public: + template + explicit PlacementImpl(Args&&... args) { + new (&space_) T(std::forward(args)...); + } + const T* get() const { + return Launder(reinterpret_cast(&space_)); + } + T* get() { return Launder(reinterpret_cast(&space_)); } + + private: + template + static P* Launder(P* p) { +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L + return std::launder(p); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return __builtin_launder(p); +#else + // When `std::launder` or equivalent are not available, we rely on + // undefined behavior, which works as intended on Abseil's officially + // supported platforms as of Q3 2023. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + return p; +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif +#endif + } + + alignas(T) unsigned char space_[sizeof(T)]; + }; + + // If the object is trivially destructible we use a member directly to avoid + // potential once-init runtime initialization. It somewhat defeats the + // purpose of NoDestructor in this case, but this makes the class more + // friendly to generic code. + std::conditional_t::value, DirectImpl, + PlacementImpl> + impl_; +}; + +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// Provide 'Class Template Argument Deduction': the type of NoDestructor's T +// will be the same type as the argument passed to NoDestructor's constructor. +template +NoDestructor(T) -> NoDestructor; +#endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_NO_DESTRUCTOR_H_ diff --git a/third_party/absl/absl/base/no_destructor_benchmark.cc b/third_party/absl/absl/base/no_destructor_benchmark.cc new file mode 100644 index 000000000..5fc88f1df --- /dev/null +++ b/third_party/absl/absl/base/no_destructor_benchmark.cc @@ -0,0 +1,165 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/no_destructor.h" +#include "benchmark/benchmark.h" + +namespace { + +// Number of static-NoDestructor-in-a-function to exercise. +// This must be low enough not to hit template instantiation limits +// (happens around 1000). +constexpr int kNumObjects = 1; // set to 512 when doing benchmarks + // 1 is faster to compile: just one templated + // function instantiation + +// Size of individual objects to benchmark static-NoDestructor-in-a-function +// usage with. +constexpr int kObjSize = sizeof(void*)*1; + +// Simple object of kObjSize bytes (rounded to int). +// We benchmark complete reading of its state via Verify(). +class BM_Blob { + public: + BM_Blob(int val) { for (auto& d : data_) d = val; } + BM_Blob() : BM_Blob(-1) {} + void Verify(int val) const { // val must be the c-tor argument + for (auto& d : data_) ABSL_INTERNAL_CHECK(d == val, ""); + } + private: + int data_[kObjSize / sizeof(int) > 0 ? kObjSize / sizeof(int) : 1]; +}; + +// static-NoDestructor-in-a-function pattern instances. +// We'll instantiate kNumObjects of them. +template +const BM_Blob& NoDestrBlobFunc() { + static absl::NoDestructor x(i); + return *x; +} + +// static-heap-ptr-in-a-function pattern instances +// We'll instantiate kNumObjects of them. +template +const BM_Blob& OnHeapBlobFunc() { + static BM_Blob* x = new BM_Blob(i); + return *x; +} + +// Type for NoDestrBlobFunc or OnHeapBlobFunc. +typedef const BM_Blob& (*FuncType)(); + +// ========================================================================= // +// Simple benchmarks that read a single BM_Blob over and over, hence +// all they touch fits into L1 CPU cache: + +// Direct non-POD global variable (style guide violation) as a baseline. +static BM_Blob direct_blob(0); + +void BM_Direct(benchmark::State& state) { + for (auto s : state) { + direct_blob.Verify(0); + } +} +BENCHMARK(BM_Direct); + +void BM_NoDestr(benchmark::State& state) { + for (auto s : state) { + NoDestrBlobFunc<0>().Verify(0); + } +} +BENCHMARK(BM_NoDestr); + +void BM_OnHeap(benchmark::State& state) { + for (auto s : state) { + OnHeapBlobFunc<0>().Verify(0); + } +} +BENCHMARK(BM_OnHeap); + +// ========================================================================= // +// Benchmarks that read kNumObjects of BM_Blob over and over, hence with +// appropriate values of sizeof(BM_Blob) and kNumObjects their working set +// can exceed a given layer of CPU cache. + +// Type of benchmark to select between NoDestrBlobFunc and OnHeapBlobFunc. +enum BM_Type { kNoDestr, kOnHeap, kDirect }; + +// BlobFunc(t, i) returns the i-th function of type t. +// n must be larger than i (we'll use kNumObjects for n). +template +FuncType BlobFunc(BM_Type t, int i) { + if (i == n) { + switch (t) { + case kNoDestr: return &NoDestrBlobFunc; + case kOnHeap: return &OnHeapBlobFunc; + case kDirect: return nullptr; + } + } + return BlobFunc(t, i); +} + +template<> +FuncType BlobFunc<0>(BM_Type t, int i) { + ABSL_INTERNAL_CHECK(i == 0, ""); + switch (t) { + case kNoDestr: return &NoDestrBlobFunc<0>; + case kOnHeap: return &OnHeapBlobFunc<0>; + case kDirect: return nullptr; + } + return nullptr; +} + +// Direct non-POD global variables (style guide violation) as a baseline. +static BM_Blob direct_blobs[kNumObjects]; + +// Helper that cheaply maps benchmark iteration to randomish index in +// [0, kNumObjects). +int RandIdx(int i) { + // int64 is to avoid overflow and generating negative return values: + return (static_cast(i) * 13) % kNumObjects; +} + +// Generic benchmark working with kNumObjects for any of the possible BM_Type. +template +void BM_Many(benchmark::State& state) { + FuncType funcs[kNumObjects]; + for (int i = 0; i < kNumObjects; ++i) { + funcs[i] = BlobFunc(t, i); + } + if (t == kDirect) { + for (auto s : state) { + int idx = RandIdx(state.iterations()); + direct_blobs[idx].Verify(-1); + } + } else { + for (auto s : state) { + int idx = RandIdx(state.iterations()); + funcs[idx]().Verify(idx); + } + } +} + +void BM_DirectMany(benchmark::State& state) { BM_Many(state); } +void BM_NoDestrMany(benchmark::State& state) { BM_Many(state); } +void BM_OnHeapMany(benchmark::State& state) { BM_Many(state); } + +BENCHMARK(BM_DirectMany); +BENCHMARK(BM_NoDestrMany); +BENCHMARK(BM_OnHeapMany); + +} // namespace diff --git a/third_party/absl/absl/base/no_destructor_test.cc b/third_party/absl/absl/base/no_destructor_test.cc new file mode 100644 index 000000000..71693c7e2 --- /dev/null +++ b/third_party/absl/absl/base/no_destructor_test.cc @@ -0,0 +1,209 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/no_destructor.h" + +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +namespace { + +struct Blob { + Blob() : val(42) {} + Blob(int x, int y) : val(x + y) {} + Blob(std::initializer_list xs) { + val = 0; + for (auto& x : xs) val += x; + } + + Blob(const Blob& /*b*/) = delete; + Blob(Blob&& b) noexcept : val(b.val) { + b.moved_out = true; + } // moving is fine + + // no crash: NoDestructor indeed does not destruct (the moved-out Blob + // temporaries do get destroyed though) + ~Blob() { ABSL_INTERNAL_CHECK(moved_out, "~Blob"); } + + int val; + bool moved_out = false; +}; + +struct TypeWithDeletedDestructor { + ~TypeWithDeletedDestructor() = delete; +}; + +TEST(NoDestructorTest, DestructorNeverCalled) { + absl::NoDestructor a; + (void)a; +} + +TEST(NoDestructorTest, Noncopyable) { + using T = absl::NoDestructor; + + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); +} + +TEST(NoDestructorTest, Interface) { + EXPECT_TRUE(std::is_trivially_destructible>::value); + EXPECT_TRUE( + std::is_trivially_destructible>::value); + { + absl::NoDestructor b; // default c-tor + // access: *, ->, get() + EXPECT_EQ(42, (*b).val); + (*b).val = 55; + EXPECT_EQ(55, b->val); + b->val = 66; + EXPECT_EQ(66, b.get()->val); + b.get()->val = 42; // NOLINT + EXPECT_EQ(42, (*b).val); + } + { + absl::NoDestructor b(70, 7); // regular c-tor, const + EXPECT_EQ(77, (*b).val); + EXPECT_EQ(77, b->val); + EXPECT_EQ(77, b.get()->val); + } + { + const absl::NoDestructor b{ + {20, 28, 40}}; // init-list c-tor, deep const + // This only works in clang, not in gcc: + // const absl::NoDestructor b({20, 28, 40}); + EXPECT_EQ(88, (*b).val); + EXPECT_EQ(88, b->val); + EXPECT_EQ(88, b.get()->val); + } +} + +TEST(NoDestructorTest, SfinaeRegressionAbstractArg) { + struct Abstract { + virtual ~Abstract() = default; + virtual int foo() const = 0; + }; + + struct Concrete : Abstract { + int foo() const override { return 17; } + }; + + struct UsesAbstractInConstructor { + explicit UsesAbstractInConstructor(const Abstract& abstract) + : i(abstract.foo()) {} + int i; + }; + + Concrete input; + absl::NoDestructor foo1(input); + EXPECT_EQ(foo1->i, 17); + absl::NoDestructor foo2( + static_cast(input)); + EXPECT_EQ(foo2->i, 17); +} + +// ========================================================================= // + +std::string* Str0() { + static absl::NoDestructor x; + return x.get(); +} + +extern const std::string& Str2(); + +const char* Str1() { + static absl::NoDestructor x(Str2() + "_Str1"); + return x->c_str(); +} + +const std::string& Str2() { + static absl::NoDestructor x("Str2"); + return *x; +} + +const std::string& Str2Copy() { + // Exercise copy construction + static absl::NoDestructor x(Str2()); + return *x; +} + +typedef std::array MyArray; +const MyArray& Array() { + static absl::NoDestructor x{{{"foo", "bar", "baz"}}}; + // This only works in clang, not in gcc: + // static absl::NoDestructor x({{"foo", "bar", "baz"}}); + return *x; +} + +typedef std::vector MyVector; +const MyVector& Vector() { + static absl::NoDestructor x{{1, 2, 3}}; + return *x; +} + +const int& Int() { + static absl::NoDestructor x; + return *x; +} + +TEST(NoDestructorTest, StaticPattern) { + EXPECT_TRUE( + std::is_trivially_destructible>::value); + EXPECT_TRUE( + std::is_trivially_destructible>::value); + EXPECT_TRUE( + std::is_trivially_destructible>::value); + EXPECT_TRUE(std::is_trivially_destructible>::value); + + EXPECT_EQ(*Str0(), ""); + Str0()->append("foo"); + EXPECT_EQ(*Str0(), "foo"); + + EXPECT_EQ(std::string(Str1()), "Str2_Str1"); + + EXPECT_EQ(Str2(), "Str2"); + EXPECT_EQ(Str2Copy(), "Str2"); + + EXPECT_THAT(Array(), testing::ElementsAre("foo", "bar", "baz")); + + EXPECT_THAT(Vector(), testing::ElementsAre(1, 2, 3)); + + EXPECT_EQ(0, Int()); // should get zero-initialized +} + +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// This would fail to compile if Class Template Argument Deduction was not +// provided for absl::NoDestructor. +TEST(NoDestructorTest, ClassTemplateArgumentDeduction) { + absl::NoDestructor i(1); + static_assert(std::is_same>::value, + "Expected deduced type to be int."); +} +#endif + +} // namespace diff --git a/third_party/absl/absl/base/nullability.h b/third_party/absl/absl/base/nullability.h new file mode 100644 index 000000000..6f49b6f53 --- /dev/null +++ b/third_party/absl/absl/base/nullability.h @@ -0,0 +1,224 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: nullability.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of "templated annotations" for designating the +// expected nullability of pointers. These annotations allow you to designate +// pointers in one of three classification states: +// +// * "Non-null" (for pointers annotated `Nonnull`), indicating that it is +// invalid for the given pointer to ever be null. +// * "Nullable" (for pointers annotated `Nullable`), indicating that it is +// valid for the given pointer to be null. +// * "Unknown" (for pointers annotated `NullabilityUnknown`), indicating +// that the given pointer has not been yet classified as either nullable or +// non-null. This is the default state of unannotated pointers. +// +// NOTE: unannotated pointers implicitly bear the annotation +// `NullabilityUnknown`; you should rarely, if ever, see this annotation used +// in the codebase explicitly. +// +// ----------------------------------------------------------------------------- +// Nullability and Contracts +// ----------------------------------------------------------------------------- +// +// These nullability annotations allow you to more clearly specify contracts on +// software components by narrowing the *preconditions*, *postconditions*, and +// *invariants* of pointer state(s) in any given interface. It then depends on +// context who is responsible for fulfilling the annotation's requirements. +// +// For example, a function may receive a pointer argument. Designating that +// pointer argument as "non-null" tightens the precondition of the contract of +// that function. It is then the responsibility of anyone calling such a +// function to ensure that the passed pointer is not null. +// +// Similarly, a function may have a pointer as a return value. Designating that +// return value as "non-null" tightens the postcondition of the contract of that +// function. In this case, however, it is the responsibility of the function +// itself to ensure that the returned pointer is not null. +// +// Clearly defining these contracts allows providers (and consumers) of such +// pointers to have more confidence in their null state. If a function declares +// a return value as "non-null", for example, the caller should not need to +// check whether the returned value is `nullptr`; it can simply assume the +// pointer is valid. +// +// Of course most interfaces already have expectations on the nullability state +// of pointers, and these expectations are, in effect, a contract; often, +// however, those contracts are either poorly or partially specified, assumed, +// or misunderstood. These nullability annotations are designed to allow you to +// formalize those contracts within the codebase. +// +// ----------------------------------------------------------------------------- +// Using Nullability Annotations +// ----------------------------------------------------------------------------- +// +// It is important to note that these annotations are not distinct strong +// *types*. They are alias templates defined to be equal to the underlying +// pointer type. A pointer annotated `Nonnull`, for example, is simply a +// pointer of type `T*`. Each annotation acts as a form of documentation about +// the contract for the given pointer. Each annotation requires providers or +// consumers of these pointers across API boundaries to take appropriate steps +// when setting or using these pointers: +// +// * "Non-null" pointers should never be null. It is the responsibility of the +// provider of this pointer to ensure that the pointer may never be set to +// null. Consumers of such pointers can treat such pointers as non-null. +// * "Nullable" pointers may or may not be null. Consumers of such pointers +// should precede any usage of that pointer (e.g. a dereference operation) +// with a a `nullptr` check. +// * "Unknown" pointers may be either "non-null" or "nullable" but have not been +// definitively determined to be in either classification state. Providers of +// such pointers across API boundaries should determine -- over time -- to +// annotate the pointer in either of the above two states. Consumers of such +// pointers across an API boundary should continue to treat such pointers as +// they currently do. +// +// Example: +// +// // PaySalary() requires the passed pointer to an `Employee` to be non-null. +// void PaySalary(absl::Nonnull e) { +// pay(e->salary); // OK to dereference +// } +// +// // CompleteTransaction() guarantees the returned pointer to an `Account` to +// // be non-null. +// absl::Nonnull balance CompleteTransaction(double fee) { +// ... +// } +// +// // Note that specifying a nullability annotation does not prevent someone +// // from violating the contract: +// +// Nullable find(Map& employees, std::string_view name); +// +// void g(Map& employees) { +// Employee *e = find(employees, "Pat"); +// // `e` can now be null. +// PaySalary(e); // Violates contract, but compiles! +// } +// +// Nullability annotations, in other words, are useful for defining and +// narrowing contracts; *enforcement* of those contracts depends on use and any +// additional (static or dynamic analysis) tooling. +// +// NOTE: The "unknown" annotation state indicates that a pointer's contract has +// not yet been positively identified. The unknown state therefore acts as a +// form of documentation of your technical debt, and a codebase that adopts +// nullability annotations should aspire to annotate every pointer as either +// "non-null" or "nullable". +// +// ----------------------------------------------------------------------------- +// Applicability of Nullability Annotations +// ----------------------------------------------------------------------------- +// +// By default, nullability annotations are applicable to raw and smart +// pointers. User-defined types can indicate compatibility with nullability +// annotations by providing an `absl_nullability_compatible` nested type. The +// actual definition of this inner type is not relevant as it is used merely as +// a marker. It is common to use a using declaration of +// `absl_nullability_compatible` set to void. +// +// // Example: +// struct MyPtr { +// using absl_nullability_compatible = void; +// ... +// }; +// +// DISCLAIMER: +// =========================================================================== +// These nullability annotations are primarily a human readable signal about the +// intended contract of the pointer. They are not *types* and do not currently +// provide any correctness guarantees. For example, a pointer annotated as +// `Nonnull` is *not guaranteed* to be non-null, and the compiler won't +// alert or prevent assignment of a `Nullable` to a `Nonnull`. +// =========================================================================== +#ifndef ABSL_BASE_NULLABILITY_H_ +#define ABSL_BASE_NULLABILITY_H_ + +#include "absl/base/internal/nullability_impl.h" + +namespace absl { + +// absl::Nonnull +// +// The indicated pointer is never null. It is the responsibility of the provider +// of this pointer across an API boundary to ensure that the pointer is never be +// set to null. Consumers of this pointer across an API boundary may safely +// dereference the pointer. +// +// Example: +// +// // `employee` is designated as not null. +// void PaySalary(absl::Nonnull employee) { +// pay(*employee); // OK to dereference +// } +template +using Nonnull = nullability_internal::NonnullImpl; + +// absl::Nullable +// +// The indicated pointer may, by design, be either null or non-null. Consumers +// of this pointer across an API boundary should perform a `nullptr` check +// before performing any operation using the pointer. +// +// Example: +// +// // `employee` may be null. +// void PaySalary(absl::Nullable employee) { +// if (employee != nullptr) { +// Pay(*employee); // OK to dereference +// } +// } +template +using Nullable = nullability_internal::NullableImpl; + +// absl::NullabilityUnknown (default) +// +// The indicated pointer has not yet been determined to be definitively +// "non-null" or "nullable." Providers of such pointers across API boundaries +// should, over time, annotate such pointers as either "non-null" or "nullable." +// Consumers of these pointers across an API boundary should treat such pointers +// with the same caution they treat currently unannotated pointers. Most +// existing code will have "unknown" pointers, which should eventually be +// migrated into one of the above two nullability states: `Nonnull` or +// `Nullable`. +// +// NOTE: Because this annotation is the global default state, pointers without +// any annotation are assumed to have "unknown" semantics. This assumption is +// designed to minimize churn and reduce clutter within the codebase. +// +// Example: +// +// // `employee`s nullability state is unknown. +// void PaySalary(absl::NullabilityUnknown employee) { +// Pay(*employee); // Potentially dangerous. API provider should investigate. +// } +// +// Note that a pointer without an annotation, by default, is assumed to have the +// annotation `NullabilityUnknown`. +// +// // `employee`s nullability state is unknown. +// void PaySalary(Employee* employee) { +// Pay(*employee); // Potentially dangerous. API provider should investigate. +// } +template +using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl; + +} // namespace absl + +#endif // ABSL_BASE_NULLABILITY_H_ diff --git a/third_party/absl/absl/base/nullability_test.cc b/third_party/absl/absl/base/nullability_test.cc new file mode 100644 index 000000000..028ea6ca1 --- /dev/null +++ b/third_party/absl/absl/base/nullability_test.cc @@ -0,0 +1,129 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/nullability.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" + +namespace { +using ::absl::Nonnull; +using ::absl::NullabilityUnknown; +using ::absl::Nullable; + +void funcWithNonnullArg(Nonnull /*arg*/) {} +template +void funcWithDeducedNonnullArg(Nonnull /*arg*/) {} + +TEST(NonnullTest, NonnullArgument) { + int var = 0; + funcWithNonnullArg(&var); + funcWithDeducedNonnullArg(&var); +} + +Nonnull funcWithNonnullReturn() { + static int var = 0; + return &var; +} + +TEST(NonnullTest, NonnullReturn) { + auto var = funcWithNonnullReturn(); + (void)var; +} + +TEST(PassThroughTest, PassesThroughRawPointerToInt) { + EXPECT_TRUE((std::is_same, int*>::value)); + EXPECT_TRUE((std::is_same, int*>::value)); + EXPECT_TRUE((std::is_same, int*>::value)); +} + +TEST(PassThroughTest, PassesThroughRawPointerToVoid) { + EXPECT_TRUE((std::is_same, void*>::value)); + EXPECT_TRUE((std::is_same, void*>::value)); + EXPECT_TRUE((std::is_same, void*>::value)); +} + +TEST(PassThroughTest, PassesThroughUniquePointerToInt) { + using T = std::unique_ptr; + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); +} + +TEST(PassThroughTest, PassesThroughSharedPointerToInt) { + using T = std::shared_ptr; + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); +} + +TEST(PassThroughTest, PassesThroughSharedPointerToVoid) { + using T = std::shared_ptr; + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); +} + +TEST(PassThroughTest, PassesThroughPointerToMemberObject) { + using T = decltype(&std::pair::first); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); +} + +TEST(PassThroughTest, PassesThroughPointerToMemberFunction) { + using T = decltype(&std::unique_ptr::reset); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); + EXPECT_TRUE((std::is_same, T>::value)); +} + +} // namespace + +// Nullable ADL lookup test +namespace util { +// Helper for NullableAdlTest. Returns true, denoting that argument-dependent +// lookup found this implementation of DidAdlWin. Must be in namespace +// util itself, not a nested anonymous namespace. +template +bool DidAdlWin(T*) { + return true; +} + +// Because this type is defined in namespace util, an unqualified call to +// DidAdlWin with a pointer to MakeAdlWin will find the above implementation. +struct MakeAdlWin {}; +} // namespace util + +namespace { +// Returns false, denoting that ADL did not inspect namespace util. If it +// had, the better match (T*) above would have won out over the (...) here. +bool DidAdlWin(...) { return false; } + +TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) { + // Treatment: util::Nullable contributes nothing to ADL because + // int* itself doesn't. + EXPECT_FALSE(DidAdlWin((int*)nullptr)); + EXPECT_FALSE(DidAdlWin((Nullable)nullptr)); + + // Control: Argument-dependent lookup does find the implementation in + // namespace util when the underlying pointee type resides there. + EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr)); + EXPECT_TRUE(DidAdlWin((Nullable)nullptr)); +} +} // namespace diff --git a/third_party/absl/absl/base/optimization.h b/third_party/absl/absl/base/optimization.h new file mode 100644 index 000000000..f98599589 --- /dev/null +++ b/third_party/absl/absl/base/optimization.h @@ -0,0 +1,305 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/options.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. This macro is +// useful when you wish to preserve the existing function order within a stack +// trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: callers should replace uses of this macro with `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when C++17 becomes available to +// them. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// ABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class ABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#if defined(__clang__) || defined(__GNUC__) +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) +#else +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define ABSL_PREDICT_FALSE(x) (x) +#define ABSL_PREDICT_TRUE(x) (x) +#endif + +// `ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL()` aborts the program in the fastest +// possible way, with no attempt at logging. One use is to implement hardening +// aborts with ABSL_OPTION_HARDENED. Since this is an internal symbol, it +// should not be used directly outside of Abseil. +#if ABSL_HAVE_BUILTIN(__builtin_trap) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() __builtin_trap() +#else +#define ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL() abort() +#endif + +// `ABSL_INTERNAL_UNREACHABLE_IMPL()` is the platform specific directive to +// indicate that a statement is unreachable, and to allow the compiler to +// optimize accordingly. Clients should use `ABSL_UNREACHABLE()`, which is +// defined below. +#if defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L +#define ABSL_INTERNAL_UNREACHABLE_IMPL() std::unreachable() +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_unreachable() +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __builtin_assume(false) +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_UNREACHABLE_IMPL() __assume(false) +#else +#define ABSL_INTERNAL_UNREACHABLE_IMPL() +#endif + +// `ABSL_UNREACHABLE()` is an unreachable statement. A program which reaches +// one has undefined behavior, and the compiler may optimize accordingly. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +// Abort in hardened mode to avoid dangerous undefined behavior. +#define ABSL_UNREACHABLE() \ + do { \ + ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) +#else +// The assert only fires in debug mode to aid in debugging. +// When NDEBUG is defined, reaching ABSL_UNREACHABLE() is undefined behavior. +#define ABSL_UNREACHABLE() \ + do { \ + /* NOLINTNEXTLINE: misc-static-assert */ \ + assert(false && "ABSL_UNREACHABLE reached"); \ + ABSL_INTERNAL_UNREACHABLE_IMPL(); \ + } while (false) +#endif + +// ABSL_ASSUME(cond) +// +// Informs the compiler that a condition is always true and that it can assume +// it to be true for optimization purposes. +// +// WARNING: If the condition is false, the program can produce undefined and +// potentially dangerous behavior. +// +// In !NDEBUG mode, the condition is checked with an assert(). +// +// NOTE: The expression must not have side effects, as it may only be evaluated +// in some compilation modes and not others. Some compilers may issue a warning +// if the compiler cannot prove the expression has no side effects. For example, +// the expression should not use a function call since the compiler cannot prove +// that a function call does not have side effects. +// +// Example: +// +// int x = ...; +// ABSL_ASSUME(x >= 0); +// // The compiler can optimize the division to a simple right shift using the +// // assumption specified above. +// int y = x / 16; +// +#if !defined(NDEBUG) +#define ABSL_ASSUME(cond) assert(cond) +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_ASSUME(cond) __builtin_assume(cond) +#elif defined(_MSC_VER) +#define ABSL_ASSUME(cond) __assume(cond) +#elif defined(__cpp_lib_unreachable) && __cpp_lib_unreachable >= 202202L +#define ABSL_ASSUME(cond) \ + do { \ + if (!(cond)) std::unreachable(); \ + } while (false) +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_ASSUME(cond) \ + do { \ + if (!(cond)) __builtin_unreachable(); \ + } while (false) +#else +#define ABSL_ASSUME(cond) \ + do { \ + static_cast(false && (cond)); \ + } while (false) +#endif + +// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) +// This macro forces small unique name on a static file level symbols like +// static local variables or static functions. This is intended to be used in +// macro definitions to optimize the cost of generated code. Do NOT use it on +// symbols exported from translation unit since it may cause a link time +// conflict. +// +// Example: +// +// #define MY_MACRO(txt) +// namespace { +// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; +// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); +// const char* VeryVeryLongFuncName() { return txt; } +// } +// + +#if defined(__GNUC__) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) +#else +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/third_party/absl/absl/base/optimization_test.cc b/third_party/absl/absl/base/optimization_test.cc new file mode 100644 index 000000000..e83369f32 --- /dev/null +++ b/third_party/absl/absl/base/optimization_test.cc @@ -0,0 +1,129 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/optimization.h" + +#include "gtest/gtest.h" +#include "absl/types/optional.h" + +namespace { + +// Tests for the ABSL_PREDICT_TRUE and ABSL_PREDICT_FALSE macros. +// The tests only verify that the macros are functionally correct - i.e. code +// behaves as if they weren't used. They don't try to check their impact on +// optimization. + +TEST(PredictTest, PredictTrue) { + EXPECT_TRUE(ABSL_PREDICT_TRUE(true)); + EXPECT_FALSE(ABSL_PREDICT_TRUE(false)); + EXPECT_TRUE(ABSL_PREDICT_TRUE(1 == 1)); + EXPECT_FALSE(ABSL_PREDICT_TRUE(1 == 2)); + + if (ABSL_PREDICT_TRUE(false)) ADD_FAILURE(); + if (!ABSL_PREDICT_TRUE(true)) ADD_FAILURE(); + + EXPECT_TRUE(ABSL_PREDICT_TRUE(true) && true); + EXPECT_TRUE(ABSL_PREDICT_TRUE(true) || false); +} + +TEST(PredictTest, PredictFalse) { + EXPECT_TRUE(ABSL_PREDICT_FALSE(true)); + EXPECT_FALSE(ABSL_PREDICT_FALSE(false)); + EXPECT_TRUE(ABSL_PREDICT_FALSE(1 == 1)); + EXPECT_FALSE(ABSL_PREDICT_FALSE(1 == 2)); + + if (ABSL_PREDICT_FALSE(false)) ADD_FAILURE(); + if (!ABSL_PREDICT_FALSE(true)) ADD_FAILURE(); + + EXPECT_TRUE(ABSL_PREDICT_FALSE(true) && true); + EXPECT_TRUE(ABSL_PREDICT_FALSE(true) || false); +} + +TEST(PredictTest, OneEvaluation) { + // Verify that the expression is only evaluated once. + int x = 0; + if (ABSL_PREDICT_TRUE((++x) == 0)) ADD_FAILURE(); + EXPECT_EQ(x, 1); + if (ABSL_PREDICT_FALSE((++x) == 0)) ADD_FAILURE(); + EXPECT_EQ(x, 2); +} + +TEST(PredictTest, OperatorOrder) { + // Verify that operator order inside and outside the macro behaves well. + // These would fail for a naive '#define ABSL_PREDICT_TRUE(x) x' + EXPECT_TRUE(ABSL_PREDICT_TRUE(1 && 2) == true); + EXPECT_TRUE(ABSL_PREDICT_FALSE(1 && 2) == true); + EXPECT_TRUE(!ABSL_PREDICT_TRUE(1 == 2)); + EXPECT_TRUE(!ABSL_PREDICT_FALSE(1 == 2)); +} + +TEST(PredictTest, Pointer) { + const int x = 3; + const int *good_intptr = &x; + const int *null_intptr = nullptr; + EXPECT_TRUE(ABSL_PREDICT_TRUE(good_intptr)); + EXPECT_FALSE(ABSL_PREDICT_TRUE(null_intptr)); + EXPECT_TRUE(ABSL_PREDICT_FALSE(good_intptr)); + EXPECT_FALSE(ABSL_PREDICT_FALSE(null_intptr)); +} + +TEST(PredictTest, Optional) { + // Note: An optional's truth value is the value's existence, not its truth. + absl::optional has_value(false); + absl::optional no_value; + EXPECT_TRUE(ABSL_PREDICT_TRUE(has_value)); + EXPECT_FALSE(ABSL_PREDICT_TRUE(no_value)); + EXPECT_TRUE(ABSL_PREDICT_FALSE(has_value)); + EXPECT_FALSE(ABSL_PREDICT_FALSE(no_value)); +} + +class ImplictlyConvertibleToBool { + public: + explicit ImplictlyConvertibleToBool(bool value) : value_(value) {} + operator bool() const { // NOLINT(google-explicit-constructor) + return value_; + } + + private: + bool value_; +}; + +TEST(PredictTest, ImplicitBoolConversion) { + const ImplictlyConvertibleToBool is_true(true); + const ImplictlyConvertibleToBool is_false(false); + if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE(); + if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE(); + if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE(); + if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE(); +} + +class ExplictlyConvertibleToBool { + public: + explicit ExplictlyConvertibleToBool(bool value) : value_(value) {} + explicit operator bool() const { return value_; } + + private: + bool value_; +}; + +TEST(PredictTest, ExplicitBoolConversion) { + const ExplictlyConvertibleToBool is_true(true); + const ExplictlyConvertibleToBool is_false(false); + if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE(); + if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE(); + if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE(); + if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE(); +} + +} // namespace diff --git a/third_party/absl/absl/base/options.h b/third_party/absl/absl/base/options.h new file mode 100644 index 000000000..d7bf8cffe --- /dev/null +++ b/third_party/absl/absl/base/options.h @@ -0,0 +1,258 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. + +#ifndef ABSL_BASE_OPTIONS_H_ +#define ABSL_BASE_OPTIONS_H_ + +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// ABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. + +#define ABSL_OPTION_USE_STD_ANY 2 + + +// ABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// User code should not inspect this macro. To check in the preprocessor if +// absl::optional is a typedef of std::optional, use the feature macro +// ABSL_USES_STD_OPTIONAL. + +#define ABSL_OPTION_USE_STD_OPTIONAL 2 + + +// ABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::string_view is a typedef of std::string_view, use the feature macro +// ABSL_USES_STD_STRING_VIEW. + +#define ABSL_OPTION_USE_STD_STRING_VIEW 2 + +// ABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::variant is a typedef of std::variant, use the feature macro +// ABSL_USES_STD_VARIANT. + +#define ABSL_OPTION_USE_STD_VARIANT 2 + +// ABSL_OPTION_USE_STD_ORDERING +// +// This option controls whether absl::{partial,weak,strong}_ordering are +// implemented as aliases to the std:: ordering types, or as an independent +// implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use aliases. This requires that all code using Abseil +// is built in C++20 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if working std:: ordering types are available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, they will name different types, +// with different mangled names and binary layout, depending on the compiler +// flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// the ordering types are aliases of std:: ordering types, use the feature macro +// ABSL_USES_STD_ORDERING. + +#define ABSL_OPTION_USE_STD_ORDERING 2 + +// ABSL_OPTION_USE_INLINE_NAMESPACE +// ABSL_OPTION_INLINE_NAMESPACE_NAME +// +// These options controls whether all entities in the absl namespace are +// contained within an inner inline namespace. This does not affect the +// user-visible API of Abseil, but it changes the mangled names of all symbols. +// +// This can be useful as a version tag if you are distributing Abseil in +// precompiled form. This will prevent a binary library build of Abseil with +// one inline namespace being used with headers configured with a different +// inline namespace name. Binary packagers are reminded that Abseil does not +// guarantee any ABI stability in Abseil, so any update of Abseil or +// configuration change in such a binary package should be combined with a +// new, unique value for the inline namespace name. +// +// A value of 0 means not to use inline namespaces. +// +// A value of 1 means to use an inline namespace with the given name inside +// namespace absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also +// be changed to a new, unique identifier name. In particular "head" is not +// allowed. + +#define ABSL_OPTION_USE_INLINE_NAMESPACE 1 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20240116 + +// ABSL_OPTION_HARDENED +// +// This option enables a "hardened" build in release mode (in this context, +// release mode is defined as a build where the `NDEBUG` macro is defined). +// +// A value of 0 means that "hardened" mode is not enabled. +// +// A value of 1 means that "hardened" mode is enabled. +// +// Hardened builds have additional security checks enabled when `NDEBUG` is +// defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a +// no-op, as well as disabling other bespoke program consistency checks. By +// defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in +// release mode. These checks guard against programming errors that may lead to +// security vulnerabilities. In release mode, when one of these programming +// errors is encountered, the program will immediately abort, possibly without +// any attempt at logging. +// +// The checks enabled by this option are not free; they do incur runtime cost. +// +// The checks enabled by this option are always active when `NDEBUG` is not +// defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The +// checks enabled by this option may abort the program in a different way and +// log additional information when `NDEBUG` is not defined. + +#define ABSL_OPTION_HARDENED 0 + +#endif // ABSL_BASE_OPTIONS_H_ diff --git a/third_party/absl/absl/base/policy_checks.h b/third_party/absl/absl/base/policy_checks.h new file mode 100644 index 000000000..372e848d8 --- /dev/null +++ b/third_party/absl/absl/base/policy_checks.h @@ -0,0 +1,113 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Toolchain Check +// ----------------------------------------------------------------------------- + +// We support Visual Studio 2019 (MSVC++ 16.0) and later. +// This minimum will go up. +#if defined(_MSC_VER) && _MSC_VER < 1920 && !defined(__clang__) +#error "This package requires Visual Studio 2019 (MSVC++ 16.0) or higher." +#endif + +// We support GCC 7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 7 +#error "This package requires GCC 7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++14 as the minimum. +#if defined(_MSVC_LANG) +#if _MSVC_LANG < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // _MSVC_LANG < 201402L +#elif defined(__cplusplus) +#if __cplusplus < 201402L +#error "C++ versions less than C++14 are not supported." +#endif // __cplusplus < 201402L +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/third_party/absl/absl/base/port.h b/third_party/absl/absl/base/port.h new file mode 100644 index 000000000..5bc4d6cd9 --- /dev/null +++ b/third_party/absl/absl/base/port.h @@ -0,0 +1,25 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/third_party/absl/absl/base/prefetch.h b/third_party/absl/absl/base/prefetch.h new file mode 100644 index 000000000..eb40a4453 --- /dev/null +++ b/third_party/absl/absl/base/prefetch.h @@ -0,0 +1,209 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: prefetch.h +// ----------------------------------------------------------------------------- +// +// This header file defines prefetch functions to prefetch memory contents +// into the first level cache (L1) for the current CPU. The prefetch logic +// offered in this header is limited to prefetching first level cachelines +// only, and is aimed at relatively 'simple' prefetching logic. +// +#ifndef ABSL_BASE_PREFETCH_H_ +#define ABSL_BASE_PREFETCH_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(ABSL_INTERNAL_HAVE_SSE) +#include +#endif + +#if defined(_MSC_VER) +#include +#if defined(ABSL_INTERNAL_HAVE_SSE) +#pragma intrinsic(_mm_prefetch) +#endif +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Moves data into the L1 cache before it is read, or "prefetches" it. +// +// The value of `addr` is the address of the memory to prefetch. If +// the target and compiler support it, data prefetch instructions are +// generated. If the prefetch is done some time before the memory is +// read, it may be in the cache by the time the read occurs. +// +// This method prefetches data with the highest degree of temporal locality; +// data is prefetched where possible into all levels of the cache. +// +// Incorrect or gratuitous use of this function can degrade performance. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// // Computes incremental checksum for `data`. +// int ComputeChecksum(int sum, absl::string_view data); +// +// // Computes cumulative checksum for all values in `data` +// int ComputeChecksum(absl::Span data) { +// int sum = 0; +// auto it = data.begin(); +// auto pit = data.begin(); +// auto end = data.end(); +// for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) { +// absl::PrefetchToLocalCache(pit->data()); +// } +// for (; pit != end; ++pit, ++it) { +// sum = ComputeChecksum(sum, *it); +// absl::PrefetchToLocalCache(pit->data()); +// } +// for (; it != end; ++it) { +// sum = ComputeChecksum(sum, *it); +// } +// return sum; +// } +// +void PrefetchToLocalCache(const void* addr); + +// Moves data into the L1 cache before it is read, or "prefetches" it. +// +// This function is identical to `PrefetchToLocalCache()` except that it has +// non-temporal locality: the fetched data should not be left in any of the +// cache tiers. This is useful for cases where the data is used only once / +// short term, for example, invoking a destructor on an object. +// +// Incorrect or gratuitous use of this function can degrade performance. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// template +// void DestroyPointers(Iterator begin, Iterator end) { +// size_t distance = std::min(8U, bars.size()); +// +// int dist = 8; +// auto prefetch_it = begin; +// while (prefetch_it != end && --dist;) { +// absl::PrefetchToLocalCacheNta(*prefetch_it++); +// } +// while (prefetch_it != end) { +// delete *begin++; +// absl::PrefetchToLocalCacheNta(*prefetch_it++); +// } +// while (begin != end) { +// delete *begin++; +// } +// } +// +void PrefetchToLocalCacheNta(const void* addr); + +// Moves data into the L1 cache with the intent to modify it. +// +// This function is similar to `PrefetchToLocalCache()` except that it +// prefetches cachelines with an 'intent to modify' This typically includes +// invalidating cache entries for this address in all other cache tiers, and an +// exclusive access intent. +// +// Incorrect or gratuitous use of this function can degrade performance. As this +// function can invalidate cached cachelines on other caches and computer cores, +// incorrect usage of this function can have an even greater negative impact +// than incorrect regular prefetches. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// void* Arena::Allocate(size_t size) { +// void* ptr = AllocateBlock(size); +// absl::PrefetchToLocalCacheForWrite(p); +// return ptr; +// } +// +void PrefetchToLocalCacheForWrite(const void* addr); + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_HAVE_PREFETCH 1 + +// See __builtin_prefetch: +// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. +// +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) { + __builtin_prefetch(addr, 0, 3); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) { + __builtin_prefetch(addr, 0, 0); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) { + // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1) + // unless -march=broadwell or newer; this is not generally the default, so we + // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel + // processors and has been present on AMD processors since the K6-2. +#if defined(__x86_64__) && !defined(__PRFCHW__) + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); +#else + __builtin_prefetch(addr, 1, 3); +#endif +} + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_HAVE_PREFETCH 1 + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) { +#if defined(_MM_HINT_ET0) + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_ET0); +#elif !defined(_MSC_VER) && defined(__x86_64__) + // _MM_HINT_ET0 is not universally supported. As we commented further + // up, PREFETCHW is recognized as a no-op on older Intel processors + // and has been present on AMD processors since the K6-2. We have this + // disabled for MSVC compilers as this miscompiles on older MSVC compilers. + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); +#endif +} + +#else + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCache( + const void* addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheNta( + const void* addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void PrefetchToLocalCacheForWrite( + const void* addr) {} + +#endif + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_PREFETCH_H_ diff --git a/third_party/absl/absl/base/prefetch_test.cc b/third_party/absl/absl/base/prefetch_test.cc new file mode 100644 index 000000000..ee219897c --- /dev/null +++ b/third_party/absl/absl/base/prefetch_test.cc @@ -0,0 +1,64 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/prefetch.h" + +#include + +#include "gtest/gtest.h" + +namespace { + +// Below tests exercise the functions only to guarantee they compile and execute +// correctly. We make no attempt at verifying any prefetch instructions being +// generated and executed: we assume the various implementation in terms of +// __builtin_prefetch() or x86 intrinsics to be correct and well tested. + +TEST(PrefetchTest, PrefetchToLocalCache_StackA) { + char buf[100] = {}; + absl::PrefetchToLocalCache(buf); + absl::PrefetchToLocalCacheNta(buf); + absl::PrefetchToLocalCacheForWrite(buf); +} + +TEST(PrefetchTest, PrefetchToLocalCache_Heap) { + auto memory = std::make_unique(200 << 10); + memset(memory.get(), 0, 200 << 10); + absl::PrefetchToLocalCache(memory.get()); + absl::PrefetchToLocalCacheNta(memory.get()); + absl::PrefetchToLocalCacheForWrite(memory.get()); + absl::PrefetchToLocalCache(memory.get() + (50 << 10)); + absl::PrefetchToLocalCacheNta(memory.get() + (50 << 10)); + absl::PrefetchToLocalCacheForWrite(memory.get() + (50 << 10)); + absl::PrefetchToLocalCache(memory.get() + (100 << 10)); + absl::PrefetchToLocalCacheNta(memory.get() + (100 << 10)); + absl::PrefetchToLocalCacheForWrite(memory.get() + (100 << 10)); + absl::PrefetchToLocalCache(memory.get() + (150 << 10)); + absl::PrefetchToLocalCacheNta(memory.get() + (150 << 10)); + absl::PrefetchToLocalCacheForWrite(memory.get() + (150 << 10)); +} + +TEST(PrefetchTest, PrefetchToLocalCache_Nullptr) { + absl::PrefetchToLocalCache(nullptr); + absl::PrefetchToLocalCacheNta(nullptr); + absl::PrefetchToLocalCacheForWrite(nullptr); +} + +TEST(PrefetchTest, PrefetchToLocalCache_InvalidPtr) { + absl::PrefetchToLocalCache(reinterpret_cast(0x785326532L)); + absl::PrefetchToLocalCacheNta(reinterpret_cast(0x785326532L)); + absl::PrefetchToLocalCacheForWrite(reinterpret_cast(0x78532L)); +} + +} // namespace diff --git a/third_party/absl/absl/base/raw_logging_test.cc b/third_party/absl/absl/base/raw_logging_test.cc new file mode 100644 index 000000000..3d30bd386 --- /dev/null +++ b/third_party/absl/absl/base/raw_logging_test.cc @@ -0,0 +1,79 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This test serves primarily as a compilation test for base/raw_logging.h. +// Raw logging testing is covered by logging_unittest.cc, which is not as +// portable as this test. + +#include "absl/base/internal/raw_logging.h" + +#include + +#include "gtest/gtest.h" +#include "absl/strings/str_cat.h" + +namespace { + +TEST(RawLoggingCompilationTest, Log) { + ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); + ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); + ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); + ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); +} + +TEST(RawLoggingCompilationTest, PassingCheck) { + ABSL_RAW_CHECK(true, "RAW CHECK"); +} + +// Not all platforms support output from raw log, so we don't verify any +// particular output for RAW check failures (expecting the empty string +// accomplishes this). This test is primarily a compilation test, but we +// are verifying process death when EXPECT_DEATH works for a platform. +const char kExpectedDeathOutput[] = ""; + +TEST(RawLoggingDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(RawLoggingDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +TEST(InternalLog, CompilationTest) { + ABSL_INTERNAL_LOG(INFO, "Internal Log"); + std::string log_msg = "Internal Log"; + ABSL_INTERNAL_LOG(INFO, log_msg); + + ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); + + float d = 1.1f; + ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); +} + +TEST(InternalLogDeathTest, FailingCheck) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), + kExpectedDeathOutput); +} + +TEST(InternalLogDeathTest, LogFatal) { + EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), + kExpectedDeathOutput); +} + +} // namespace diff --git a/third_party/absl/absl/base/spinlock_test_common.cc b/third_party/absl/absl/base/spinlock_test_common.cc new file mode 100644 index 000000000..e9047158c --- /dev/null +++ b/third_party/absl/absl/base/spinlock_test_common.cc @@ -0,0 +1,285 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A bunch of threads repeatedly hash an array of ints protected by a +// spinlock. If the spinlock is working properly, all elements of the +// array should be equal at the end of the test. + +#include +#include +#include +#include // NOLINT(build/c++11) +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/base/macros.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/notification.h" + +constexpr uint32_t kNumThreads = 10; +constexpr int32_t kIters = 1000; + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// This is defined outside of anonymous namespace so that it can be +// a friend of SpinLock to access protected methods for testing. +struct SpinLockTest { + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time) { + return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); + } + static int64_t DecodeWaitCycles(uint32_t lock_value) { + return SpinLock::DecodeWaitCycles(lock_value); + } + + static bool IsCooperative(const SpinLock& l) { return l.IsCooperative(); } +}; + +namespace { + +static constexpr size_t kArrayLength = 10; +static uint32_t values[kArrayLength]; + +ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + +// Simple integer hash function based on the public domain lookup2 hash. +// http://burtleburtle.net/bob/c/lookup2.c +static uint32_t Hash32(uint32_t a, uint32_t c) { + uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. + a -= b; a -= c; a ^= (c >> 13); + b -= c; b -= a; b ^= (a << 8); + c -= a; c -= b; c ^= (b >> 13); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 16); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 3); + b -= c; b -= a; b ^= (a << 10); + c -= a; c -= b; c ^= (b >> 15); + return c; +} + +static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) { + for (int i = 0; i < kIters; i++) { + SpinLockHolder h(spinlock); + for (size_t j = 0; j < kArrayLength; j++) { + const size_t index = (j + thread_salt) % kArrayLength; + values[index] = Hash32(values[index], thread_salt); + std::this_thread::yield(); + } + } +} + +static void ThreadedTest(SpinLock* spinlock) { + std::vector threads; + threads.reserve(kNumThreads); + for (uint32_t i = 0; i < kNumThreads; ++i) { + threads.push_back(std::thread(TestFunction, i, spinlock)); + } + for (auto& thread : threads) { + thread.join(); + } + + SpinLockHolder h(spinlock); + for (size_t i = 1; i < kArrayLength; i++) { + EXPECT_EQ(values[0], values[i]); + } +} + +#ifndef ABSL_HAVE_THREAD_SANITIZER +static_assert(std::is_trivially_destructible(), ""); +#endif + +TEST(SpinLock, StackNonCooperativeDisablesScheduling) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + spinlock.Unlock(); +} + +TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { + static_noncooperative_spinlock.Lock(); + EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); + static_noncooperative_spinlock.Unlock(); +} + +TEST(SpinLock, WaitCyclesEncoding) { + // These are implementation details not exported by SpinLock. + const int kProfileTimestampShift = 7; + const int kLockwordReservedShift = 3; + const uint32_t kSpinLockSleeper = 8; + + // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping + // but the lower kProfileTimestampShift will be dropped. + const int kMaxCyclesShift = + 32 - kLockwordReservedShift + kProfileTimestampShift; + const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + + // These bits should be zero after encoding. + const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; + + // These bits are dropped when wait cycles are encoded. + const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + + // Test a bunch of random values + std::default_random_engine generator; + // Shift to avoid overflow below. + std::uniform_int_distribution time_distribution( + 0, std::numeric_limits::max() >> 3); + std::uniform_int_distribution cycle_distribution(0, kMaxCycles); + + for (int i = 0; i < 100; i++) { + int64_t start_time = time_distribution(generator); + int64_t cycles = cycle_distribution(generator); + int64_t end_time = start_time + cycles; + uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); + EXPECT_EQ(0u, lock_value & kLockwordReservedMask); + int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0, decoded & kProfileTimestampMask); + EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); + } + + // Test corner cases + int64_t start_time = time_distribution(generator); + EXPECT_EQ(kSpinLockSleeper, + SpinLockTest::EncodeWaitCycles(start_time, start_time)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); + EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); + EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, + SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); + + // Check that we cannot produce kSpinLockSleeper during encoding. + int64_t sleeper_cycles = + kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); + uint32_t sleeper_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); + EXPECT_NE(sleeper_value, kSpinLockSleeper); + + // Test clamping + uint32_t max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); + int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + EXPECT_EQ(expected_max_value_decoded, max_value_decoded); + + const int64_t step = (1 << kProfileTimestampShift); + uint32_t after_max_value = + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); + int64_t after_max_value_decoded = + SpinLockTest::DecodeWaitCycles(after_max_value); + EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); + + uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( + start_time, start_time + kMaxCycles - step); + int64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); + EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); +} + +TEST(SpinLockWithThreads, StackSpinLock) { + SpinLock spinlock; + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) { + SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + ThreadedTest(&spinlock); +} + +TEST(SpinLockWithThreads, StaticCooperativeSpinLock) { + ThreadedTest(&static_cooperative_spinlock); +} + +TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) { + ThreadedTest(&static_noncooperative_spinlock); +} + +TEST(SpinLockWithThreads, DoesNotDeadlock) { + struct Helper { + static void NotifyThenLock(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". + b->DecrementCount(); + SpinLockHolder l(spinlock); + } + + static void LockThenWait(Notification* locked, SpinLock* spinlock, + BlockingCounter* b) { + SpinLockHolder l(spinlock); + locked->Notify(); + b->Wait(); + } + + static void DeadlockTest(SpinLock* spinlock, int num_spinners) { + Notification locked; + BlockingCounter counter(num_spinners); + std::vector threads; + + threads.push_back( + std::thread(Helper::LockThenWait, &locked, spinlock, &counter)); + for (int i = 0; i < num_spinners; ++i) { + threads.push_back( + std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter)); + } + + for (auto& thread : threads) { + thread.join(); + } + } + }; + + SpinLock stack_cooperative_spinlock( + base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); + Helper::DeadlockTest(&stack_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&stack_noncooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_cooperative_spinlock, + base_internal::NumCPUs() * 2); + Helper::DeadlockTest(&static_noncooperative_spinlock, + base_internal::NumCPUs() * 2); +} + +TEST(SpinLockTest, IsCooperative) { + SpinLock default_constructor; + EXPECT_TRUE(SpinLockTest::IsCooperative(default_constructor)); + + SpinLock cooperative(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + EXPECT_TRUE(SpinLockTest::IsCooperative(cooperative)); + + SpinLock kernel_only(base_internal::SCHEDULE_KERNEL_ONLY); + EXPECT_FALSE(SpinLockTest::IsCooperative(kernel_only)); +} + +} // namespace +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/base/thread_annotations.h b/third_party/absl/absl/base/thread_annotations.h new file mode 100644 index 000000000..4a3f3e33e --- /dev/null +++ b/third_party/absl/absl/base/thread_annotations.h @@ -0,0 +1,333 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#if ABSL_HAVE_ATTRIBUTE(guarded_by) +#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) +#else +#define ABSL_GUARDED_BY(x) +#endif + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) +#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) +#else +#define ABSL_PT_GUARDED_BY(x) +#endif + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#if ABSL_HAVE_ATTRIBUTE(acquired_after) +#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_AFTER(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(acquired_before) +#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_BEFORE(...) +#endif + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + __attribute__((exclusive_locks_required(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + __attribute__((shared_locks_required(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCKS_REQUIRED(...) +#endif + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks that cannot be held by callers of this function, as they +// might be acquired by this function (Abseil's `Mutex` locks are +// non-reentrant). +#if ABSL_HAVE_ATTRIBUTE(locks_excluded) +#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) +#else +#define ABSL_LOCKS_EXCLUDED(...) +#endif + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#if ABSL_HAVE_ATTRIBUTE(lock_returned) +#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) +#else +#define ABSL_LOCK_RETURNED(x) +#endif + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#if ABSL_HAVE_ATTRIBUTE(lockable) +#define ABSL_LOCKABLE __attribute__((lockable)) +#else +#define ABSL_LOCKABLE +#endif + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#if ABSL_HAVE_ATTRIBUTE(scoped_lockable) +#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) +#else +#define ABSL_SCOPED_LOCKABLE +#endif + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + __attribute__((exclusive_lock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) +#endif + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + __attribute__((shared_lock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCK_FUNCTION(...) +#endif + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#if ABSL_HAVE_ATTRIBUTE(unlock_function) +#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#else +#define ABSL_UNLOCK_FUNCTION(...) +#endif + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + __attribute__((exclusive_trylock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + __attribute__((shared_trylock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) +#endif + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) +#define ABSL_ASSERT_SHARED_LOCK(...) \ + __attribute__((assert_shared_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_SHARED_LOCK(...) +#endif + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + __attribute__((no_thread_safety_analysis)) +#else +#define ABSL_NO_THREAD_SAFETY_ANALYSIS +#endif + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. +template +inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/third_party/absl/absl/base/throw_delegate_test.cc b/third_party/absl/absl/base/throw_delegate_test.cc new file mode 100644 index 000000000..e74362b70 --- /dev/null +++ b/third_party/absl/absl/base/throw_delegate_test.cc @@ -0,0 +1,175 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/throw_delegate.h" + +#include +#include +#include + +#include "absl/base/config.h" +#include "gtest/gtest.h" + +namespace { + +using absl::base_internal::ThrowStdLogicError; +using absl::base_internal::ThrowStdInvalidArgument; +using absl::base_internal::ThrowStdDomainError; +using absl::base_internal::ThrowStdLengthError; +using absl::base_internal::ThrowStdOutOfRange; +using absl::base_internal::ThrowStdRuntimeError; +using absl::base_internal::ThrowStdRangeError; +using absl::base_internal::ThrowStdOverflowError; +using absl::base_internal::ThrowStdUnderflowError; +using absl::base_internal::ThrowStdBadFunctionCall; +using absl::base_internal::ThrowStdBadAlloc; + +constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog"; + +template +void ExpectThrowChar(void (*f)(const char*)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template +void ExpectThrowString(void (*f)(const std::string&)) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(what_arg); + FAIL() << "Didn't throw"; + } catch (const E& e) { + EXPECT_STREQ(e.what(), what_arg); + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); +#endif +} + +template +void ExpectThrowNoWhat(void (*f)()) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + f(); + FAIL() << "Didn't throw"; + } catch (const E& e) { + } +#else + EXPECT_DEATH_IF_SUPPORTED(f(), ""); +#endif +} + +TEST(ThrowDelegate, ThrowStdLogicErrorChar) { + ExpectThrowChar(ThrowStdLogicError); +} + +TEST(ThrowDelegate, ThrowStdInvalidArgumentChar) { + ExpectThrowChar(ThrowStdInvalidArgument); +} + +TEST(ThrowDelegate, ThrowStdDomainErrorChar) { + ExpectThrowChar(ThrowStdDomainError); +} + +TEST(ThrowDelegate, ThrowStdLengthErrorChar) { + ExpectThrowChar(ThrowStdLengthError); +} + +TEST(ThrowDelegate, ThrowStdOutOfRangeChar) { + ExpectThrowChar(ThrowStdOutOfRange); +} + +TEST(ThrowDelegate, ThrowStdRuntimeErrorChar) { + ExpectThrowChar(ThrowStdRuntimeError); +} + +TEST(ThrowDelegate, ThrowStdRangeErrorChar) { + ExpectThrowChar(ThrowStdRangeError); +} + +TEST(ThrowDelegate, ThrowStdOverflowErrorChar) { + ExpectThrowChar(ThrowStdOverflowError); +} + +TEST(ThrowDelegate, ThrowStdUnderflowErrorChar) { + ExpectThrowChar(ThrowStdUnderflowError); +} + +TEST(ThrowDelegate, ThrowStdLogicErrorString) { + ExpectThrowString(ThrowStdLogicError); +} + +TEST(ThrowDelegate, ThrowStdInvalidArgumentString) { + ExpectThrowString(ThrowStdInvalidArgument); +} + +TEST(ThrowDelegate, ThrowStdDomainErrorString) { + ExpectThrowString(ThrowStdDomainError); +} + +TEST(ThrowDelegate, ThrowStdLengthErrorString) { + ExpectThrowString(ThrowStdLengthError); +} + +TEST(ThrowDelegate, ThrowStdOutOfRangeString) { + ExpectThrowString(ThrowStdOutOfRange); +} + +TEST(ThrowDelegate, ThrowStdRuntimeErrorString) { + ExpectThrowString(ThrowStdRuntimeError); +} + +TEST(ThrowDelegate, ThrowStdRangeErrorString) { + ExpectThrowString(ThrowStdRangeError); +} + +TEST(ThrowDelegate, ThrowStdOverflowErrorString) { + ExpectThrowString(ThrowStdOverflowError); +} + +TEST(ThrowDelegate, ThrowStdUnderflowErrorString) { + ExpectThrowString(ThrowStdUnderflowError); +} + +TEST(ThrowDelegate, ThrowStdBadFunctionCallNoWhat) { +#ifdef ABSL_HAVE_EXCEPTIONS + try { + ThrowStdBadFunctionCall(); + FAIL() << "Didn't throw"; + } catch (const std::bad_function_call&) { + } +#ifdef _LIBCPP_VERSION + catch (const std::exception&) { + // https://reviews.llvm.org/D92397 causes issues with the vtable for + // std::bad_function_call when using libc++ as a shared library. + } +#endif +#else + EXPECT_DEATH_IF_SUPPORTED(ThrowStdBadFunctionCall(), ""); +#endif +} + +TEST(ThrowDelegate, ThrowStdBadAllocNoWhat) { + ExpectThrowNoWhat(ThrowStdBadAlloc); +} + +} // namespace diff --git a/third_party/absl/absl/cleanup/BUILD.bazel b/third_party/absl/absl/cleanup/BUILD.bazel new file mode 100644 index 000000000..984d57149 --- /dev/null +++ b/third_party/absl/absl/cleanup/BUILD.bazel @@ -0,0 +1,73 @@ +# Copyright 2021 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) + +licenses(["notice"]) + +cc_library( + name = "cleanup_internal", + hdrs = ["internal/cleanup.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:base_internal", + "//absl/base:core_headers", + "//absl/utility", + ], +) + +cc_library( + name = "cleanup", + hdrs = [ + "cleanup.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":cleanup_internal", + "//absl/base:config", + "//absl/base:core_headers", + ], +) + +cc_test( + name = "cleanup_test", + size = "small", + srcs = [ + "cleanup_test.cc", + ], + copts = ABSL_TEST_COPTS, + deps = [ + ":cleanup", + "//absl/base:config", + "//absl/utility", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) diff --git a/third_party/absl/absl/cleanup/CMakeLists.txt b/third_party/absl/absl/cleanup/CMakeLists.txt new file mode 100644 index 000000000..f5af40b44 --- /dev/null +++ b/third_party/absl/absl/cleanup/CMakeLists.txt @@ -0,0 +1,56 @@ +# Copyright 2021 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + cleanup_internal + HDRS + "internal/cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base_internal + absl::core_headers + absl::utility + PUBLIC +) + +absl_cc_library( + NAME + cleanup + HDRS + "cleanup.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::cleanup_internal + absl::config + absl::core_headers + PUBLIC +) + +absl_cc_test( + NAME + cleanup_test + SRCS + "cleanup_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::cleanup + absl::config + absl::utility + GTest::gmock_main +) diff --git a/third_party/absl/absl/cleanup/cleanup.h b/third_party/absl/absl/cleanup/cleanup.h new file mode 100644 index 000000000..960ccd080 --- /dev/null +++ b/third_party/absl/absl/cleanup/cleanup.h @@ -0,0 +1,140 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cleanup.h +// ----------------------------------------------------------------------------- +// +// `absl::Cleanup` implements the scope guard idiom, invoking the contained +// callback's `operator()() &&` on scope exit. +// +// Example: +// +// ``` +// absl::Status CopyGoodData(const char* source_path, const char* sink_path) { +// FILE* source_file = fopen(source_path, "r"); +// if (source_file == nullptr) { +// return absl::NotFoundError("No source file"); // No cleanups execute +// } +// +// // C++17 style cleanup using class template argument deduction +// absl::Cleanup source_closer = [source_file] { fclose(source_file); }; +// +// FILE* sink_file = fopen(sink_path, "w"); +// if (sink_file == nullptr) { +// return absl::NotFoundError("No sink file"); // First cleanup executes +// } +// +// // C++11 style cleanup using the factory function +// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); +// +// Data data; +// while (ReadData(source_file, &data)) { +// if (!data.IsGood()) { +// absl::Status result = absl::FailedPreconditionError("Read bad data"); +// return result; // Both cleanups execute +// } +// SaveData(sink_file, &data); +// } +// +// return absl::OkStatus(); // Both cleanups execute +// } +// ``` +// +// Methods: +// +// `std::move(cleanup).Cancel()` will prevent the callback from executing. +// +// `std::move(cleanup).Invoke()` will execute the callback early, before +// destruction, and prevent the callback from executing in the destructor. +// +// Usage: +// +// `absl::Cleanup` is not an interface type. It is only intended to be used +// within the body of a function. It is not a value type and instead models a +// control flow construct. Check out `defer` in Golang for something similar. + +#ifndef ABSL_CLEANUP_CLEANUP_H_ +#define ABSL_CLEANUP_CLEANUP_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/cleanup/internal/cleanup.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +template +class ABSL_MUST_USE_RESULT Cleanup final { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + public: + Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT + + Cleanup(Cleanup&& other) = default; + + void Cancel() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.DestroyCallback(); + } + + void Invoke() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + + ~Cleanup() { + if (storage_.IsCallbackEngaged()) { + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + } + + private: + cleanup_internal::Storage storage_; +}; + +// `absl::Cleanup c = /* callback */;` +// +// C++17 type deduction API for creating an instance of `absl::Cleanup` +#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) +template +Cleanup(Callback callback) -> Cleanup; +#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + +// `auto c = absl::MakeCleanup(/* callback */);` +// +// C++11 type deduction API for creating an instance of `absl::Cleanup` +template +absl::Cleanup MakeCleanup(Callback callback) { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + return {std::move(callback)}; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/third_party/absl/absl/cleanup/cleanup_test.cc b/third_party/absl/absl/cleanup/cleanup_test.cc new file mode 100644 index 000000000..46b885899 --- /dev/null +++ b/third_party/absl/absl/cleanup/cleanup_test.cc @@ -0,0 +1,311 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/cleanup/cleanup.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/utility/utility.h" + +namespace { + +using Tag = absl::cleanup_internal::Tag; + +template +constexpr bool IsSame() { + return (std::is_same::value); +} + +struct IdentityFactory { + template + static Callback AsCallback(Callback callback) { + return Callback(std::move(callback)); + } +}; + +// `FunctorClass` is a type used for testing `absl::Cleanup`. It is intended to +// represent users that make their own move-only callback types outside of +// `std::function` and lambda literals. +class FunctorClass { + using Callback = std::function; + + public: + explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {} + + FunctorClass(FunctorClass&& other) + : callback_(absl::exchange(other.callback_, Callback())) {} + + FunctorClass(const FunctorClass&) = delete; + + FunctorClass& operator=(const FunctorClass&) = delete; + + FunctorClass& operator=(FunctorClass&&) = delete; + + void operator()() const& = delete; + + void operator()() && { + ASSERT_TRUE(callback_); + callback_(); + callback_ = nullptr; + } + + private: + Callback callback_; +}; + +struct FunctorClassFactory { + template + static FunctorClass AsCallback(Callback callback) { + return FunctorClass(std::move(callback)); + } +}; + +struct StdFunctionFactory { + template + static std::function AsCallback(Callback callback) { + return std::function(std::move(callback)); + } +}; + +using CleanupTestParams = + ::testing::Types; +template +struct CleanupTest : public ::testing::Test {}; +TYPED_TEST_SUITE(CleanupTest, CleanupTestParams); + +bool fn_ptr_called = false; +void FnPtrFunction() { fn_ptr_called = true; } + +TYPED_TEST(CleanupTest, FactoryProducesCorrectType) { + { + auto callback = TypeParam::AsCallback([] {}); + auto cleanup = absl::MakeCleanup(std::move(callback)); + + static_assert( + IsSame, decltype(cleanup)>(), + ""); + } + + { + auto cleanup = absl::MakeCleanup(&FnPtrFunction); + + static_assert(IsSame, decltype(cleanup)>(), + ""); + } + + { + auto cleanup = absl::MakeCleanup(FnPtrFunction); + + static_assert(IsSame, decltype(cleanup)>(), + ""); + } +} + +#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) +TYPED_TEST(CleanupTest, CTADProducesCorrectType) { + { + auto callback = TypeParam::AsCallback([] {}); + absl::Cleanup cleanup = std::move(callback); + + static_assert( + IsSame, decltype(cleanup)>(), + ""); + } + + { + absl::Cleanup cleanup = &FnPtrFunction; + + static_assert(IsSame, decltype(cleanup)>(), + ""); + } + + { + absl::Cleanup cleanup = FnPtrFunction; + + static_assert(IsSame, decltype(cleanup)>(), + ""); + } +} + +TYPED_TEST(CleanupTest, FactoryAndCTADProduceSameType) { + { + auto callback = IdentityFactory::AsCallback([] {}); + auto factory_cleanup = absl::MakeCleanup(callback); + absl::Cleanup deduction_cleanup = callback; + + static_assert( + IsSame(), ""); + } + + { + auto factory_cleanup = + absl::MakeCleanup(FunctorClassFactory::AsCallback([] {})); + absl::Cleanup deduction_cleanup = FunctorClassFactory::AsCallback([] {}); + + static_assert( + IsSame(), ""); + } + + { + auto factory_cleanup = + absl::MakeCleanup(StdFunctionFactory::AsCallback([] {})); + absl::Cleanup deduction_cleanup = StdFunctionFactory::AsCallback([] {}); + + static_assert( + IsSame(), ""); + } + + { + auto factory_cleanup = absl::MakeCleanup(&FnPtrFunction); + absl::Cleanup deduction_cleanup = &FnPtrFunction; + + static_assert( + IsSame(), ""); + } + + { + auto factory_cleanup = absl::MakeCleanup(FnPtrFunction); + absl::Cleanup deduction_cleanup = FnPtrFunction; + + static_assert( + IsSame(), ""); + } +} +#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + +TYPED_TEST(CleanupTest, BasicUsage) { + bool called = false; + + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); + EXPECT_FALSE(called); // Constructor shouldn't invoke the callback + } + + EXPECT_TRUE(called); // Destructor should invoke the callback +} + +TYPED_TEST(CleanupTest, BasicUsageWithFunctionPointer) { + fn_ptr_called = false; + + { + auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(&FnPtrFunction)); + EXPECT_FALSE(fn_ptr_called); // Constructor shouldn't invoke the callback + } + + EXPECT_TRUE(fn_ptr_called); // Destructor should invoke the callback +} + +TYPED_TEST(CleanupTest, Cancel) { + bool called = false; + + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); + EXPECT_FALSE(called); // Constructor shouldn't invoke the callback + + std::move(cleanup).Cancel(); + EXPECT_FALSE(called); // Cancel shouldn't invoke the callback + } + + EXPECT_FALSE(called); // Destructor shouldn't invoke the callback +} + +TYPED_TEST(CleanupTest, Invoke) { + bool called = false; + + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); + EXPECT_FALSE(called); // Constructor shouldn't invoke the callback + + std::move(cleanup).Invoke(); + EXPECT_TRUE(called); // Invoke should invoke the callback + + called = false; // Reset tracker before destructor runs + } + + EXPECT_FALSE(called); // Destructor shouldn't invoke the callback +} + +TYPED_TEST(CleanupTest, Move) { + bool called = false; + + { + auto moved_from_cleanup = + absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); + EXPECT_FALSE(called); // Constructor shouldn't invoke the callback + + { + auto moved_to_cleanup = std::move(moved_from_cleanup); + EXPECT_FALSE(called); // Move shouldn't invoke the callback + } + + EXPECT_TRUE(called); // Destructor should invoke the callback + + called = false; // Reset tracker before destructor runs + } + + EXPECT_FALSE(called); // Destructor shouldn't invoke the callback +} + +int DestructionCount = 0; + +struct DestructionCounter { + void operator()() {} + + ~DestructionCounter() { ++DestructionCount; } +}; + +TYPED_TEST(CleanupTest, DestructorDestroys) { + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); + DestructionCount = 0; + } + + EXPECT_EQ(DestructionCount, 1); // Engaged cleanup destroys +} + +TYPED_TEST(CleanupTest, CancelDestroys) { + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); + DestructionCount = 0; + + std::move(cleanup).Cancel(); + EXPECT_EQ(DestructionCount, 1); // Cancel destroys + } + + EXPECT_EQ(DestructionCount, 1); // Canceled cleanup does not double destroy +} + +TYPED_TEST(CleanupTest, InvokeDestroys) { + { + auto cleanup = + absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); + DestructionCount = 0; + + std::move(cleanup).Invoke(); + EXPECT_EQ(DestructionCount, 1); // Invoke destroys + } + + EXPECT_EQ(DestructionCount, 1); // Invoked cleanup does not double destroy +} + +} // namespace diff --git a/third_party/absl/absl/cleanup/internal/cleanup.h b/third_party/absl/absl/cleanup/internal/cleanup.h new file mode 100644 index 000000000..2783fcb7c --- /dev/null +++ b/third_party/absl/absl/cleanup/internal/cleanup.h @@ -0,0 +1,100 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ +#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace cleanup_internal { + +struct Tag {}; + +template +constexpr bool WasDeduced() { + return (std::is_same::value) && + (sizeof...(Args) == 0); +} + +template +constexpr bool ReturnsVoid() { + return (std::is_same, void>::value); +} + +template +class Storage { + public: + Storage() = delete; + + explicit Storage(Callback callback) { + // Placement-new into a character buffer is used for eager destruction when + // the cleanup is invoked or cancelled. To ensure this optimizes well, the + // behavior is implemented locally instead of using an absl::optional. + ::new (GetCallbackBuffer()) Callback(std::move(callback)); + is_callback_engaged_ = true; + } + + Storage(Storage&& other) { + ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + + ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); + is_callback_engaged_ = true; + + other.DestroyCallback(); + } + + Storage(const Storage& other) = delete; + + Storage& operator=(Storage&& other) = delete; + + Storage& operator=(const Storage& other) = delete; + + void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } + + Callback& GetCallback() { + return *reinterpret_cast(GetCallbackBuffer()); + } + + bool IsCallbackEngaged() const { return is_callback_engaged_; } + + void DestroyCallback() { + is_callback_engaged_ = false; + GetCallback().~Callback(); + } + + void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { + std::move(GetCallback())(); + } + + private: + bool is_callback_engaged_; + alignas(Callback) char callback_buffer_[sizeof(Callback)]; +}; + +} // namespace cleanup_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/third_party/absl/absl/container/BUILD.bazel b/third_party/absl/absl/container/BUILD.bazel new file mode 100644 index 000000000..0ba2fa76d --- /dev/null +++ b/third_party/absl/absl/container/BUILD.bazel @@ -0,0 +1,1085 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +load( + "//absl:copts/configure_copts.bzl", + "ABSL_DEFAULT_COPTS", + "ABSL_DEFAULT_LINKOPTS", + "ABSL_TEST_COPTS", +) + +package( + default_visibility = ["//visibility:public"], + features = [ + "header_modules", + "layering_check", + "parse_headers", + ], +) + +licenses(["notice"]) + +cc_library( + name = "compressed_tuple", + hdrs = ["internal/compressed_tuple.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/utility", + ], +) + +cc_test( + name = "compressed_tuple_test", + srcs = ["internal/compressed_tuple_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + ":test_instance_tracker", + "//absl/memory", + "//absl/types:any", + "//absl/types:optional", + "//absl/utility", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "fixed_array", + hdrs = ["fixed_array.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + "//absl/algorithm", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:dynamic_annotations", + "//absl/base:throw_delegate", + "//absl/memory", + ], +) + +cc_test( + name = "fixed_array_test", + srcs = ["fixed_array_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fixed_array", + ":test_allocator", + "//absl/base:config", + "//absl/base:exception_testing", + "//absl/hash:hash_testing", + "//absl/memory", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "fixed_array_exception_safety_test", + srcs = ["fixed_array_exception_safety_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":fixed_array", + "//absl/base:config", + "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "fixed_array_benchmark", + testonly = 1, + srcs = ["fixed_array_benchmark.cc"], + copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + deps = [ + ":fixed_array", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "inlined_vector_internal", + hdrs = ["internal/inlined_vector.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":compressed_tuple", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/types:span", + ], +) + +cc_library( + name = "inlined_vector", + hdrs = ["inlined_vector.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":inlined_vector_internal", + "//absl/algorithm", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + "//absl/memory", + "//absl/meta:type_traits", + ], +) + +cc_library( + name = "test_allocator", + testonly = 1, + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + textual_hdrs = ["internal/test_allocator.h"], + visibility = ["//visibility:private"], +) + +cc_test( + name = "inlined_vector_test", + srcs = ["inlined_vector_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":inlined_vector", + ":test_allocator", + ":test_instance_tracker", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:exception_testing", + "//absl/hash:hash_testing", + "//absl/log:check", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "inlined_vector_benchmark", + testonly = 1, + srcs = ["inlined_vector_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + deps = [ + ":inlined_vector", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/strings", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_test( + name = "inlined_vector_exception_safety_test", + srcs = ["inlined_vector_exception_safety_test.cc"], + copts = ABSL_TEST_COPTS, + deps = [ + ":inlined_vector", + "//absl/base:config", + "//absl/base:exception_safety_testing", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "test_instance_tracker", + testonly = 1, + srcs = ["internal/test_instance_tracker.cc"], + hdrs = ["internal/test_instance_tracker.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = ["//absl/types:compare"], +) + +cc_test( + name = "test_instance_tracker_test", + srcs = ["internal/test_instance_tracker_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":test_instance_tracker", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +NOTEST_TAGS_MOBILE = [ + "no_test_android_arm", + "no_test_android_arm64", + "no_test_android_x86", + "no_test_ios_x86_64", +] + +cc_library( + name = "flat_hash_map", + hdrs = ["flat_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_map_test", + srcs = ["flat_hash_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":flat_hash_map", + ":hash_generator_testing", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "//absl/log:check", + "//absl/meta:type_traits", + "//absl/types:any", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "flat_hash_set", + hdrs = ["flat_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "flat_hash_set_test", + srcs = ["flat_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":container_memory", + ":flat_hash_set", + ":hash_generator_testing", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "//absl/base:config", + "//absl/log:check", + "//absl/memory", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_map", + hdrs = ["node_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":hash_function_defaults", + ":node_slot_policy", + ":raw_hash_map", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_map_test", + srcs = ["node_hash_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":hash_generator_testing", + ":node_hash_map", + ":tracked", + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_hash_set", + hdrs = ["node_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_function_defaults", + ":node_slot_policy", + ":raw_hash_set", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/memory", + ], +) + +cc_test( + name = "node_hash_set_test", + srcs = ["node_hash_set_test.cc"], + copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":node_hash_set", + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "container_memory", + hdrs = ["internal/container_memory.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/utility", + ], +) + +cc_test( + name = "container_memory_test", + srcs = ["internal/container_memory_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":container_memory", + ":test_instance_tracker", + "//absl/base:no_destructor", + "//absl/meta:type_traits", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_function_defaults", + hdrs = ["internal/hash_function_defaults.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//visibility:private", + ], + deps = [ + "//absl/base:config", + "//absl/hash", + "//absl/strings", + "//absl/strings:cord", + ], +) + +cc_test( + name = "hash_function_defaults_test", + srcs = ["internal/hash_function_defaults_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], + deps = [ + ":hash_function_defaults", + "//absl/hash", + "//absl/random", + "//absl/strings", + "//absl/strings:cord", + "//absl/strings:cord_test_helpers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_generator_testing", + testonly = 1, + srcs = ["internal/hash_generator_testing.cc"], + hdrs = ["internal/hash_generator_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_testing", + "//absl/base:no_destructor", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + ], +) + +cc_library( + name = "hash_policy_testing", + testonly = 1, + hdrs = ["internal/hash_policy_testing.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/hash", + "//absl/strings", + ], +) + +cc_test( + name = "hash_policy_testing_test", + srcs = ["internal/hash_policy_testing_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_testing", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hash_policy_traits", + hdrs = ["internal/hash_policy_traits.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":common_policy_traits", + "//absl/meta:type_traits", + ], +) + +cc_test( + name = "hash_policy_traits_test", + srcs = ["internal/hash_policy_traits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_traits", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "common_policy_traits", + hdrs = ["internal/common_policy_traits.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = ["//absl/meta:type_traits"], +) + +cc_test( + name = "common_policy_traits_test", + srcs = ["internal/common_policy_traits_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":common_policy_traits", + "//absl/base:config", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "hashtable_debug", + hdrs = ["internal/hashtable_debug.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hashtable_debug_hooks", + ], +) + +cc_library( + name = "hashtable_debug_hooks", + hdrs = ["internal/hashtable_debug_hooks.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) + +cc_library( + name = "hashtablez_sampler", + srcs = [ + "internal/hashtablez_sampler.cc", + "internal/hashtablez_sampler_force_weak_definition.cc", + ], + hdrs = ["internal/hashtablez_sampler.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/debugging:stacktrace", + "//absl/memory", + "//absl/profiling:exponential_biased", + "//absl/profiling:sample_recorder", + "//absl/synchronization", + "//absl/time", + "//absl/utility", + ], +) + +cc_test( + name = "hashtablez_sampler_test", + srcs = ["internal/hashtablez_sampler_test.cc"], + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = [ + "no_test_wasm", + ], + deps = [ + ":hashtablez_sampler", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/profiling:sample_recorder", + "//absl/synchronization", + "//absl/synchronization:thread_pool", + "//absl/time", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "node_slot_policy", + hdrs = ["internal/node_slot_policy.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = ["//absl/base:config"], +) + +cc_test( + name = "node_slot_policy_test", + srcs = ["internal/node_slot_policy_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_policy_traits", + ":node_slot_policy", + "//absl/base:config", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "raw_hash_map", + hdrs = ["internal/raw_hash_map.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":container_memory", + ":raw_hash_set", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + ], +) + +cc_library( + name = "common", + hdrs = ["internal/common.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "//absl/types:optional", + ], +) + +cc_library( + name = "raw_hash_set", + srcs = ["internal/raw_hash_set.cc"], + hdrs = ["internal/raw_hash_set.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":common", + ":compressed_tuple", + ":container_memory", + ":hash_policy_traits", + ":hashtable_debug_hooks", + ":hashtablez_sampler", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:dynamic_annotations", + "//absl/base:endian", + "//absl/base:prefetch", + "//absl/base:raw_logging_internal", + "//absl/hash", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/numeric:bits", + "//absl/utility", + ], +) + +cc_test( + name = "raw_hash_set_test", + srcs = ["internal/raw_hash_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkstatic = 1, + tags = NOTEST_TAGS_MOBILE + [ + "no_test_loonix", + # TODO(b/237097643): investigate race and remove + "noarm_gemu", + ], + deps = [ + ":container_memory", + ":flat_hash_map", + ":flat_hash_set", + ":hash_function_defaults", + ":hash_policy_testing", + ":hashtable_debug", + ":hashtablez_sampler", + ":raw_hash_set", + ":test_allocator", + "//absl/base", + "//absl/base:config", + "//absl/base:core_headers", + "//absl/base:prefetch", + "//absl/hash", + "//absl/log", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "raw_hash_set_benchmark", + testonly = 1, + srcs = ["internal/raw_hash_set_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":hash_function_defaults", + ":raw_hash_set", + "//absl/base:raw_logging_internal", + "//absl/strings:str_format", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_binary( + name = "raw_hash_set_probe_benchmark", + testonly = 1, + srcs = ["internal/raw_hash_set_probe_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = select({ + "//conditions:default": [], + }) + ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":flat_hash_map", + ":hash_function_defaults", + ":hashtable_debug", + ":raw_hash_set", + "//absl/base:no_destructor", + "//absl/random", + "//absl/random:distributions", + "//absl/strings", + "//absl/strings:str_format", + "//absl/types:optional", + ], +) + +cc_test( + name = "raw_hash_set_allocator_test", + size = "small", + srcs = ["internal/raw_hash_set_allocator_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":raw_hash_set", + ":tracked", + "//absl/base:config", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "layout", + hdrs = ["internal/layout.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + "//absl/base:core_headers", + "//absl/debugging:demangle_internal", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:span", + "//absl/utility", + ], +) + +cc_test( + name = "layout_test", + size = "small", + srcs = ["internal/layout_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], + visibility = ["//visibility:private"], + deps = [ + ":layout", + "//absl/base:config", + "//absl/log:check", + "//absl/types:span", + "//absl/utility", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "layout_benchmark", + testonly = 1, + srcs = ["internal/layout_benchmark.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":layout", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "@com_github_google_benchmark//:benchmark_main", + ], +) + +cc_library( + name = "tracked", + testonly = 1, + hdrs = ["internal/tracked.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) + +cc_library( + name = "unordered_map_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_map_constructor_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_map_lookup_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_map_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_constructor_test", + testonly = 1, + hdrs = ["internal/unordered_set_constructor_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_members_test", + testonly = 1, + hdrs = ["internal/unordered_set_members_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_map_members_test", + testonly = 1, + hdrs = ["internal/unordered_map_members_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/meta:type_traits", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_lookup_test", + testonly = 1, + hdrs = ["internal/unordered_set_lookup_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_library( + name = "unordered_set_modifiers_test", + testonly = 1, + hdrs = ["internal/unordered_set_modifiers_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hash_generator_testing", + ":hash_policy_testing", + "@com_google_googletest//:gtest", + ], +) + +cc_test( + name = "unordered_set_test", + srcs = ["internal/unordered_set_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":unordered_set_constructor_test", + ":unordered_set_lookup_test", + ":unordered_set_members_test", + ":unordered_set_modifiers_test", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "unordered_map_test", + srcs = ["internal/unordered_map_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + deps = [ + ":unordered_map_constructor_test", + ":unordered_map_lookup_test", + ":unordered_map_members_test", + ":unordered_map_modifiers_test", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_test( + name = "sample_element_size_test", + srcs = ["sample_element_size_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_loonix"], + visibility = ["//visibility:private"], + deps = [ + ":flat_hash_map", + ":flat_hash_set", + ":node_hash_map", + ":node_hash_set", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_library( + name = "btree", + srcs = [ + "internal/btree.h", + "internal/btree_container.h", + ], + hdrs = [ + "btree_map.h", + "btree_set.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:public"], + deps = [ + ":common", + ":common_policy_traits", + ":compressed_tuple", + ":container_memory", + ":layout", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/base:throw_delegate", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/strings:cord", + "//absl/types:compare", + "//absl/utility", + ], +) + +cc_library( + name = "btree_test_common", + testonly = 1, + hdrs = ["btree_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":flat_hash_set", + "//absl/strings", + "//absl/strings:cord", + "//absl/time", + ], +) + +cc_test( + name = "btree_test", + size = "large", + srcs = [ + "btree_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + shard_count = 10, + tags = [ + "no_test:os:ios", + "no_test_ios", + "no_test_wasm", + ], + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":btree_test_common", + ":test_allocator", + ":test_instance_tracker", + "//absl/algorithm:container", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/flags:flag", + "//absl/hash:hash_testing", + "//absl/memory", + "//absl/random", + "//absl/strings", + "//absl/types:compare", + "//absl/types:optional", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "btree_benchmark", + testonly = 1, + srcs = [ + "btree_benchmark.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":btree_test_common", + ":flat_hash_map", + ":flat_hash_set", + ":hashtable_debug", + "//absl/algorithm:container", + "//absl/base:raw_logging_internal", + "//absl/hash", + "//absl/log", + "//absl/memory", + "//absl/random", + "//absl/strings:cord", + "//absl/strings:str_format", + "//absl/time", + "@com_github_google_benchmark//:benchmark_main", + "@com_google_googletest//:gtest", + ], +) diff --git a/third_party/absl/absl/container/CMakeLists.txt b/third_party/absl/absl/container/CMakeLists.txt new file mode 100644 index 000000000..128cc0e9d --- /dev/null +++ b/third_party/absl/absl/container/CMakeLists.txt @@ -0,0 +1,989 @@ +# +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +absl_cc_library( + NAME + btree + HDRS + "btree_map.h" + "btree_set.h" + "internal/btree.h" + "internal/btree_container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::container_common + absl::common_policy_traits + absl::compare + absl::compressed_tuple + absl::container_memory + absl::cord + absl::core_headers + absl::layout + absl::memory + absl::raw_logging_internal + absl::strings + absl::throw_delegate + absl::type_traits + absl::utility +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + btree_test_common + hdrs + "btree_test.h" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::btree + absl::cord + absl::flat_hash_set + absl::strings + absl::time + TESTONLY +) + +absl_cc_test( + NAME + btree_test + SRCS + "btree_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::algorithm_container + absl::btree + absl::btree_test_common + absl::compare + absl::core_headers + absl::flags + absl::hash_testing + absl::optional + absl::random_random + absl::raw_logging_internal + absl::strings + absl::test_allocator + absl::test_instance_tracker + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + compressed_tuple + HDRS + "internal/compressed_tuple.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + compressed_tuple_test + SRCS + "internal/compressed_tuple_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::any + absl::compressed_tuple + absl::memory + absl::optional + absl::test_instance_tracker + absl::utility + GTest::gmock_main +) + +absl_cc_library( + NAME + fixed_array + HDRS + "fixed_array.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::algorithm + absl::config + absl::core_headers + absl::dynamic_annotations + absl::throw_delegate + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + fixed_array_test + SRCS + "fixed_array_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fixed_array + absl::config + absl::exception_testing + absl::hash_testing + absl::memory + absl::test_allocator + GTest::gmock_main +) + +absl_cc_test( + NAME + fixed_array_exception_safety_test + SRCS + "fixed_array_exception_safety_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::fixed_array + absl::config + absl::exception_safety_testing + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + inlined_vector_internal + HDRS + "internal/inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compressed_tuple + absl::config + absl::core_headers + absl::memory + absl::span + absl::type_traits + PUBLIC +) + +absl_cc_library( + NAME + inlined_vector + HDRS + "inlined_vector.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::algorithm + absl::core_headers + absl::inlined_vector_internal + absl::throw_delegate + absl::memory + absl::type_traits + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + test_allocator + HDRS + "internal/test_allocator.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + GTest::gmock +) + +absl_cc_test( + NAME + inlined_vector_test + SRCS + "inlined_vector_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::check + absl::config + absl::core_headers + absl::exception_testing + absl::hash_testing + absl::inlined_vector + absl::memory + absl::strings + absl::test_allocator + absl::test_instance_tracker + GTest::gmock_main +) + +absl_cc_test( + NAME + inlined_vector_exception_safety_test + SRCS + "inlined_vector_exception_safety_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::inlined_vector + absl::config + absl::exception_safety_testing + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + test_instance_tracker + HDRS + "internal/test_instance_tracker.h" + SRCS + "internal/test_instance_tracker.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::compare + TESTONLY +) + +absl_cc_test( + NAME + test_instance_tracker_test + SRCS + "internal/test_instance_tracker_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::test_instance_tracker + GTest::gmock_main +) + +absl_cc_library( + NAME + flat_hash_map + HDRS + "flat_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + flat_hash_map_test + SRCS + "flat_hash_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::any + absl::check + absl::flat_hash_map + absl::hash_generator_testing + absl::type_traits + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + GTest::gmock_main +) + +absl_cc_library( + NAME + flat_hash_set + HDRS + "flat_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::hash_function_defaults + absl::raw_hash_set + absl::algorithm_container + absl::core_headers + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + flat_hash_set_test + SRCS + "flat_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + "-DUNORDERED_SET_CXX17" + DEPS + absl::check + absl::config + absl::container_memory + absl::flat_hash_set + absl::hash_generator_testing + absl::memory + absl::strings + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + GTest::gmock_main +) + +absl_cc_library( + NAME + node_hash_map + HDRS + "node_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::container_memory + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_map + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + node_hash_map_test + SRCS + "node_hash_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::node_hash_map + absl::tracked + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + GTest::gmock_main +) + +absl_cc_library( + NAME + node_hash_set + HDRS + "node_hash_set.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::core_headers + absl::hash_function_defaults + absl::node_slot_policy + absl::raw_hash_set + absl::algorithm_container + absl::memory + PUBLIC +) + +absl_cc_test( + NAME + node_hash_set_test + SRCS + "node_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + "-DUNORDERED_SET_CXX17" + DEPS + absl::hash_generator_testing + absl::node_hash_set + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_memory + HDRS + "internal/container_memory.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::memory + absl::type_traits + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + container_memory_test + SRCS + "internal/container_memory_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::container_memory + absl::no_destructor + absl::strings + absl::test_instance_tracker + absl::type_traits + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_function_defaults + HDRS + "internal/hash_function_defaults.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::cord + absl::hash + absl::strings + PUBLIC +) + +absl_cc_test( + NAME + hash_function_defaults_test + SRCS + "internal/hash_function_defaults_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::cord + absl::cord_test_helpers + absl::hash_function_defaults + absl::hash + absl::random_random + absl::strings + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_generator_testing + HDRS + "internal/hash_generator_testing.h" + SRCS + "internal/hash_generator_testing.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_testing + absl::memory + absl::meta + absl::no_destructor + absl::strings + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_policy_testing + HDRS + "internal/hash_policy_testing.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash + absl::strings + TESTONLY +) + +absl_cc_test( + NAME + hash_policy_testing_test + SRCS + "internal/hash_policy_testing_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_testing + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hash_policy_traits + HDRS + "internal/hash_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::common_policy_traits + absl::meta + PUBLIC +) + +absl_cc_test( + NAME + hash_policy_traits_test + SRCS + "internal/hash_policy_traits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_policy_traits + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + common_policy_traits + HDRS + "internal/common_policy_traits.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::meta + PUBLIC +) + +absl_cc_test( + NAME + common_policy_traits_test + SRCS + "internal/common_policy_traits_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::common_policy_traits + absl::config + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtablez_sampler + HDRS + "internal/hashtablez_sampler.h" + SRCS + "internal/hashtablez_sampler.cc" + "internal/hashtablez_sampler_force_weak_definition.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::config + absl::exponential_biased + absl::raw_logging_internal + absl::sample_recorder + absl::synchronization + absl::time +) + +absl_cc_test( + NAME + hashtablez_sampler_test + SRCS + "internal/hashtablez_sampler_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::hashtablez_sampler + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug + HDRS + "internal/hashtable_debug.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::hashtable_debug_hooks +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + hashtable_debug_hooks + HDRS + "internal/hashtable_debug_hooks.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + node_slot_policy + HDRS + "internal/node_slot_policy.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) + +absl_cc_test( + NAME + node_slot_policy_test + SRCS + "internal/node_slot_policy_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::hash_policy_traits + absl::node_slot_policy + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_map + HDRS + "internal/raw_hash_map.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::container_memory + absl::core_headers + absl::raw_hash_set + absl::throw_delegate + PUBLIC +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + container_common + HDRS + "internal/common.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::type_traits +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + raw_hash_set + HDRS + "internal/raw_hash_set.h" + SRCS + "internal/raw_hash_set.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::bits + absl::compressed_tuple + absl::config + absl::container_common + absl::container_memory + absl::core_headers + absl::dynamic_annotations + absl::endian + absl::hash + absl::hash_policy_traits + absl::hashtable_debug_hooks + absl::hashtablez_sampler + absl::memory + absl::meta + absl::optional + absl::prefetch + absl::raw_logging_internal + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + raw_hash_set_test + SRCS + "internal/raw_hash_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::container_memory + absl::core_headers + absl::flat_hash_map + absl::flat_hash_set + absl::hash + absl::hash_function_defaults + absl::hash_policy_testing + absl::hashtable_debug + absl::hashtablez_sampler + absl::log + absl::memory + absl::prefetch + absl::raw_hash_set + absl::strings + absl::test_allocator + absl::type_traits + GTest::gmock_main +) + +absl_cc_test( + NAME + raw_hash_set_allocator_test + SRCS + "internal/raw_hash_set_allocator_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::raw_hash_set + absl::tracked + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + layout + HDRS + "internal/layout.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::debugging_internal + absl::meta + absl::strings + absl::span + absl::utility + PUBLIC +) + +absl_cc_test( + NAME + layout_test + SRCS + "internal/layout_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::layout + absl::check + absl::config + absl::span + absl::utility + GTest::gmock_main +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + tracked + HDRS + "internal/tracked.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_map_constructor_test + HDRS + "internal/unordered_map_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_map_lookup_test + HDRS + "internal/unordered_map_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_map_members_test + HDRS + "internal/unordered_map_members_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::type_traits + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_map_modifiers_test + HDRS + "internal/unordered_map_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_set_constructor_test + HDRS + "internal/unordered_set_constructor_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_set_lookup_test + HDRS + "internal/unordered_set_lookup_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_set_members_test + HDRS + "internal/unordered_set_members_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::type_traits + GTest::gmock + TESTONLY +) + +# Internal-only target, do not depend on directly. +absl_cc_library( + NAME + unordered_set_modifiers_test + HDRS + "internal/unordered_set_modifiers_test.h" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::hash_generator_testing + absl::hash_policy_testing + GTest::gmock + TESTONLY +) + +absl_cc_test( + NAME + unordered_set_test + SRCS + "internal/unordered_set_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::unordered_set_constructor_test + absl::unordered_set_lookup_test + absl::unordered_set_members_test + absl::unordered_set_modifiers_test + GTest::gmock_main +) + +absl_cc_test( + NAME + unordered_map_test + SRCS + "internal/unordered_map_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::unordered_map_constructor_test + absl::unordered_map_lookup_test + absl::unordered_map_members_test + absl::unordered_map_modifiers_test + GTest::gmock_main +) + +absl_cc_test( + NAME + sample_element_size_test + SRCS + "sample_element_size_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::flat_hash_map + absl::flat_hash_set + absl::node_hash_map + absl::node_hash_set + GTest::gmock_main +) diff --git a/third_party/absl/absl/container/btree_benchmark.cc b/third_party/absl/absl/container/btree_benchmark.cc new file mode 100644 index 000000000..0d26fd424 --- /dev/null +++ b/third_party/absl/absl/container/btree_benchmark.cc @@ -0,0 +1,764 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "absl/algorithm/container.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/btree_test.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/hash/hash.h" +#include "absl/log/log.h" +#include "absl/memory/memory.h" +#include "absl/random/random.h" +#include "absl/strings/cord.h" +#include "absl/strings/str_format.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +constexpr size_t kBenchmarkValues = 1 << 20; + +// How many times we add and remove sub-batches in one batch of *AddRem +// benchmarks. +constexpr size_t kAddRemBatchSize = 1 << 2; + +// Generates n values in the range [0, 4 * n]. +template +std::vector GenerateValues(int n) { + constexpr int kSeed = 23; + return GenerateValuesWithSeed(n, 4 * n, kSeed); +} + +// Benchmark insertion of values into a container. +template +void BM_InsertImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + const auto i = static_cast(state.iterations()); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.ResumeTiming(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + } +} + +template +void BM_Insert(benchmark::State& state) { + BM_InsertImpl(state, false); +} + +template +void BM_InsertSorted(benchmark::State& state) { + BM_InsertImpl(state, true); +} + +// Benchmark inserting the first few elements in a container. In b-tree, this is +// when the root node grows. +template +void BM_InsertSmall(benchmark::State& state) { + using V = typename remove_pair_const::type; + + const int kSize = 8; + std::vector values = GenerateValues(kSize); + T container; + + while (state.KeepRunningBatch(kSize)) { + for (int i = 0; i < kSize; ++i) { + benchmark::DoNotOptimize(container.insert(values[i])); + } + state.PauseTiming(); + // Do not measure the time it takes to clear the container. + container.clear(); + state.ResumeTiming(); + } +} + +template +void BM_LookupImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + while (state.KeepRunning()) { + int idx = state.iterations() % kBenchmarkValues; + benchmark::DoNotOptimize(container.find(key_of_value(values[idx]))); + } +} + +// Benchmark lookup of values in a container. +template +void BM_Lookup(benchmark::State& state) { + BM_LookupImpl(state, false); +} + +// Benchmark lookup of values in a full container, meaning that values +// are inserted in-order to take advantage of biased insertion, which +// yields a full tree. +template +void BM_FullLookup(benchmark::State& state) { + BM_LookupImpl(state, true); +} + +// Benchmark erasing values from a container. +template +void BM_Erase(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.PauseTiming(); + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + state.ResumeTiming(); + } +} + +// Benchmark erasing multiple values from a container. +template +void BM_EraseRange(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + const int start_index = i % kBenchmarkValues; + + state.PauseTiming(); + { + std::vector removed; + removed.reserve(batch_size); + auto itr = container.find(key_of_value(values[start_index])); + auto start = itr; + for (int j = 0; j < batch_size; j++) { + if (itr == container.end()) { + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + itr = container.begin(); + start = itr; + } + removed.push_back(*itr++); + } + + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + + container.insert(removed.begin(), removed.end()); + } + state.ResumeTiming(); + } +} + +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const::type; + std::vector values = GenerateValues(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Benchmark steady-state insert (into first half of range) and remove (from +// second half of range), treating the container approximately like a queue with +// log-time access for all elements. This benchmark does not test the case where +// insertion and removal happen in the same region of the tree. This benchmark +// counts two value constructors. +template +void BM_QueueAddRem(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + const size_t half = kBenchmarkValues / 2; + std::vector remove_keys(half); + std::vector add_keys(half); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(half * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + for (size_t i = 0; i < half; ++i) { + remove_keys[i] = i; + add_keys[i] = i; + } + constexpr int kSeed = 5; + std::mt19937_64 rand(kSeed); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + // Note needs lazy generation of values. + Generator g(kBenchmarkValues * kAddRemBatchSize); + + for (size_t i = 0; i < half; ++i) { + container.insert(g(add_keys[i])); + container.insert(g(half + remove_keys[i])); + } + + // There are three parts each of size "half": + // 1 is being deleted from [offset - half, offset) + // 2 is standing [offset, offset + half) + // 3 is being inserted into [offset + half, offset + 2 * half) + size_t offset = 0; + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + offset += half; + + state.ResumeTiming(); + for (size_t idx = 0; idx < half; ++idx) { + container.erase(key_of_value(g(offset - half + remove_keys[idx]))); + container.insert(g(offset + half + add_keys[idx])); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Mixed insertion and deletion in the same range using pre-constructed values. +template +void BM_MixedAddRem(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + // Create two random shuffles + std::vector remove_keys(kBenchmarkValues); + std::vector add_keys(kBenchmarkValues); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + constexpr int kSeed = 7; + std::mt19937_64 rand(kSeed); + + std::vector values = GenerateValues(kBenchmarkValues * 2); + + // Insert the first half of the values (already in random order) + container.insert(values.begin(), values.begin() + kBenchmarkValues); + + // Insert the first half of the values (already in random order) + for (size_t i = 0; i < kBenchmarkValues; ++i) { + // remove_keys and add_keys will be swapped before each round, + // therefore fill add_keys here w/ the keys being inserted, so + // they'll be the first to be removed. + remove_keys[i] = i + kBenchmarkValues; + add_keys[i] = i; + } + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + remove_keys.swap(add_keys); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + state.ResumeTiming(); + for (size_t idx = 0; idx < kBenchmarkValues; ++idx) { + container.erase(key_of_value(values[remove_keys[idx]])); + container.insert(values[add_keys[idx]]); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Insertion at end, removal from the beginning. This benchmark +// counts two value constructors. +// TODO(ezb): we could add a GenerateNext version of generator that could reduce +// noise for string-like types. +template +void BM_Fifo(benchmark::State& state) { + using V = typename remove_pair_const::type; + + T container; + // Need lazy generation of values as state.max_iterations is large. + Generator g(kBenchmarkValues + state.max_iterations); + + for (int i = 0; i < kBenchmarkValues; i++) { + container.insert(g(i)); + } + + while (state.KeepRunning()) { + container.erase(container.begin()); + container.insert(container.end(), g(state.iterations() + kBenchmarkValues)); + } +} + +// Iteration (forward) through the tree +template +void BM_FwdIter(benchmark::State& state) { + using V = typename remove_pair_const::type; + using R = typename T::value_type const*; + + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + auto iter = container.end(); + + R r = nullptr; + + while (state.KeepRunning()) { + if (iter == container.end()) iter = container.begin(); + r = &(*iter); + ++iter; + } + + benchmark::DoNotOptimize(r); +} + +// Benchmark random range-construction of a container. +template +void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + { + T container(values.begin(), values.end()); + } + + while (state.KeepRunning()) { + T container(values.begin(), values.end()); + benchmark::DoNotOptimize(container); + } +} + +template +void BM_InsertRangeRandom(benchmark::State& state) { + BM_RangeConstructionImpl(state, false); +} + +template +void BM_InsertRangeSorted(benchmark::State& state) { + BM_RangeConstructionImpl(state, true); +} + +#define STL_ORDERED_TYPES(value) \ + using stl_set_##value = std::set; \ + using stl_map_##value = std::map; \ + using stl_multiset_##value = std::multiset; \ + using stl_multimap_##value = std::multimap + +using StdString = std::string; +STL_ORDERED_TYPES(int32_t); +STL_ORDERED_TYPES(int64_t); +STL_ORDERED_TYPES(StdString); +STL_ORDERED_TYPES(Cord); +STL_ORDERED_TYPES(Time); + +#define STL_UNORDERED_TYPES(value) \ + using stl_unordered_set_##value = std::unordered_set; \ + using stl_unordered_map_##value = std::unordered_map; \ + using flat_hash_set_##value = flat_hash_set; \ + using flat_hash_map_##value = flat_hash_map; \ + using stl_unordered_multiset_##value = std::unordered_multiset; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap + +#define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \ + using stl_unordered_set_##value = std::unordered_set; \ + using stl_unordered_map_##value = std::unordered_map; \ + using flat_hash_set_##value = flat_hash_set; \ + using flat_hash_map_##value = flat_hash_map; \ + using stl_unordered_multiset_##value = std::unordered_multiset; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap + +STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash); + +STL_UNORDERED_TYPES(int32_t); +STL_UNORDERED_TYPES(int64_t); +STL_UNORDERED_TYPES(StdString); +STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash); + +#define BTREE_TYPES(value) \ + using btree_256_set_##value = \ + btree_set, std::allocator>; \ + using btree_256_map_##value = \ + btree_map, \ + std::allocator>>; \ + using btree_256_multiset_##value = \ + btree_multiset, std::allocator>; \ + using btree_256_multimap_##value = \ + btree_multimap, \ + std::allocator>> + +BTREE_TYPES(int32_t); +BTREE_TYPES(int64_t); +BTREE_TYPES(StdString); +BTREE_TYPES(Cord); +BTREE_TYPES(Time); + +#define MY_BENCHMARK4(type, func) \ + void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ + BENCHMARK(BM_##type##_##func) + +#define MY_BENCHMARK3_STL(type) \ + MY_BENCHMARK4(type, Insert); \ + MY_BENCHMARK4(type, InsertSorted); \ + MY_BENCHMARK4(type, InsertSmall); \ + MY_BENCHMARK4(type, Lookup); \ + MY_BENCHMARK4(type, FullLookup); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ + MY_BENCHMARK4(type, QueueAddRem); \ + MY_BENCHMARK4(type, MixedAddRem); \ + MY_BENCHMARK4(type, Fifo); \ + MY_BENCHMARK4(type, FwdIter); \ + MY_BENCHMARK4(type, InsertRangeRandom); \ + MY_BENCHMARK4(type, InsertRangeSorted) + +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + +#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ + MY_BENCHMARK3(btree_256_##type) + +#define MY_BENCHMARK2(type) \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \ + MY_BENCHMARK3(flat_hash_##type) + +// Define MULTI_TESTING to see benchmarks for multi-containers also. +// +// You can use --copt=-DMULTI_TESTING. +#ifdef MULTI_TESTING +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type) +#else +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type) +#endif + +MY_BENCHMARK(int32_t); +MY_BENCHMARK(int64_t); +MY_BENCHMARK(StdString); +MY_BENCHMARK(Cord); +MY_BENCHMARK(Time); + +// Define a type whose size and cost of moving are independently customizable. +// When sizeof(value_type) increases, we expect btree to no longer have as much +// cache-locality advantage over STL. When cost of moving increases, we expect +// btree to actually do more work than STL because it has to move values around +// and STL doesn't have to. +template +struct BigType { + BigType() : BigType(0) {} + explicit BigType(int x) { std::iota(values.begin(), values.end(), x); } + + void Copy(const BigType& other) { + for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i]; + // If Copies > Size, do extra copies. + for (int i = Size, idx = 0; i < Copies; ++i) { + int64_t tmp = other.values[idx]; + benchmark::DoNotOptimize(tmp); + idx = idx + 1 == Size ? 0 : idx + 1; + } + } + + BigType(const BigType& other) { Copy(other); } + BigType& operator=(const BigType& other) { + Copy(other); + return *this; + } + + // Compare only the first Copies elements if Copies is less than Size. + bool operator<(const BigType& other) const { + return std::lexicographical_compare( + values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin(), other.values.begin() + std::min(Size, Copies)); + } + bool operator==(const BigType& other) const { + return std::equal(values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin()); + } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const BigType& b) { + for (int i = 0; i < Size && i < Copies; ++i) + h = State::combine(std::move(h), b.values[i]); + return h; + } + + std::array values; +}; + +#define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \ + using stl_set_size##SIZE##copies##COPIES = std::set>; \ + using stl_map_size##SIZE##copies##COPIES = \ + std::map, intptr_t>; \ + using stl_multiset_size##SIZE##copies##COPIES = \ + std::multiset>; \ + using stl_multimap_size##SIZE##copies##COPIES = \ + std::multimap, intptr_t>; \ + using stl_unordered_set_size##SIZE##copies##COPIES = \ + std::unordered_set, \ + absl::Hash>>; \ + using stl_unordered_map_size##SIZE##copies##COPIES = \ + std::unordered_map, intptr_t, \ + absl::Hash>>; \ + using flat_hash_set_size##SIZE##copies##COPIES = \ + flat_hash_set>; \ + using flat_hash_map_size##SIZE##copies##COPIES = \ + flat_hash_map, intptr_t>; \ + using stl_unordered_multiset_size##SIZE##copies##COPIES = \ + std::unordered_multiset, \ + absl::Hash>>; \ + using stl_unordered_multimap_size##SIZE##copies##COPIES = \ + std::unordered_multimap, intptr_t, \ + absl::Hash>>; \ + using btree_256_set_size##SIZE##copies##COPIES = \ + btree_set>; \ + using btree_256_map_size##SIZE##copies##COPIES = \ + btree_map, intptr_t>; \ + using btree_256_multiset_size##SIZE##copies##COPIES = \ + btree_multiset>; \ + using btree_256_multimap_size##SIZE##copies##COPIES = \ + btree_multimap, intptr_t>; \ + MY_BENCHMARK(size##SIZE##copies##COPIES) + +// Define BIG_TYPE_TESTING to see benchmarks for more big types. +// +// You can use --copt=-DBIG_TYPE_TESTING. +#ifndef NODESIZE_TESTING +#ifdef BIG_TYPE_TESTING +BIG_TYPE_BENCHMARKS(1, 4); +BIG_TYPE_BENCHMARKS(4, 1); +BIG_TYPE_BENCHMARKS(4, 4); +BIG_TYPE_BENCHMARKS(1, 8); +BIG_TYPE_BENCHMARKS(8, 1); +BIG_TYPE_BENCHMARKS(8, 8); +BIG_TYPE_BENCHMARKS(1, 16); +BIG_TYPE_BENCHMARKS(16, 1); +BIG_TYPE_BENCHMARKS(16, 16); +BIG_TYPE_BENCHMARKS(1, 32); +BIG_TYPE_BENCHMARKS(32, 1); +BIG_TYPE_BENCHMARKS(32, 32); +#else +BIG_TYPE_BENCHMARKS(32, 32); +#endif +#endif + +// Benchmark using unique_ptrs to large value types. In order to be able to use +// the same benchmark code as the other types, use a type that holds a +// unique_ptr and has a copy constructor. +template +struct BigTypePtr { + BigTypePtr() : BigTypePtr(0) {} + explicit BigTypePtr(int x) { + ptr = absl::make_unique>(x); + } + BigTypePtr(const BigTypePtr& other) { + ptr = absl::make_unique>(*other.ptr); + } + BigTypePtr(BigTypePtr&& other) noexcept = default; + BigTypePtr& operator=(const BigTypePtr& other) { + ptr = absl::make_unique>(*other.ptr); + } + BigTypePtr& operator=(BigTypePtr&& other) noexcept = default; + + bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; } + bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; } + + std::unique_ptr> ptr; +}; + +template +double ContainerInfo(const btree_set>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} +template +double ContainerInfo(const btree_map>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} + +#define BIG_TYPE_PTR_BENCHMARKS(SIZE) \ + using stl_set_size##SIZE##copies##SIZE##ptr = std::set>; \ + using stl_map_size##SIZE##copies##SIZE##ptr = \ + std::map>; \ + using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \ + std::unordered_set, \ + absl::Hash>>; \ + using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \ + std::unordered_map>; \ + using flat_hash_set_size##SIZE##copies##SIZE##ptr = \ + flat_hash_set>; \ + using flat_hash_map_size##SIZE##copies##SIZE##ptr = \ + flat_hash_map>; \ + using btree_256_set_size##SIZE##copies##SIZE##ptr = \ + btree_set>; \ + using btree_256_map_size##SIZE##copies##SIZE##ptr = \ + btree_map>; \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) + +BIG_TYPE_PTR_BENCHMARKS(32); + +void BM_BtreeSet_IteratorSubtraction(benchmark::State& state) { + absl::InsecureBitGen bitgen; + std::vector vec; + // Randomize the set's insertion order so the nodes aren't all full. + vec.reserve(state.range(0)); + for (int i = 0; i < state.range(0); ++i) vec.push_back(i); + absl::c_shuffle(vec, bitgen); + + absl::btree_set set; + for (int i : vec) set.insert(i); + + size_t distance = absl::Uniform(bitgen, 0u, set.size()); + while (state.KeepRunningBatch(distance)) { + size_t end = absl::Uniform(bitgen, distance, set.size()); + size_t begin = end - distance; + benchmark::DoNotOptimize(set.find(static_cast(end)) - + set.find(static_cast(begin))); + distance = absl::Uniform(bitgen, 0u, set.size()); + } +} + +BENCHMARK(BM_BtreeSet_IteratorSubtraction)->Range(1 << 10, 1 << 20); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/btree_map.h b/third_party/absl/absl/container/btree_map.h new file mode 100644 index 000000000..0f62f0bd8 --- /dev/null +++ b/third_party/absl/absl/container/btree_map.h @@ -0,0 +1,887 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_map.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree maps: sorted associative containers mapping +// keys to values. +// +// * `absl::btree_map<>` +// * `absl::btree_multimap<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::map` and `std::multimap`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::map` and `std::multimap`, which are commonly implemented using +// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree maps perform better than their `std::map` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::map` and `std::multimap` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. Another important difference is that +// key-types must be copy-constructible. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. + +#ifndef ABSL_CONTAINER_BTREE_MAP_H_ +#define ABSL_CONTAINER_BTREE_MAP_H_ + +#include "absl/base/attributes.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace container_internal { + +template +struct map_params; + +} // namespace container_internal + +// absl::btree_map<> +// +// An `absl::btree_map` is an ordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::map` (in most cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_map` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_map`. +// +template , + typename Alloc = std::allocator>> +class btree_map + : public container_internal::btree_map_container< + container_internal::btree>> { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map map1; + // + // * Initializer List constructor + // + // absl::btree_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map map7(v.begin(), v.end()); + btree_map() {} + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::insert_or_assign() + // + // Inserts an element of the specified value into the `btree_map` provided + // that a value with the given key does not already exist, or replaces the + // corresponding mapped type with the forwarded `obj` argument if a key for + // that value already exists, returning an iterator pointing to the newly + // inserted element. Overloads are listed below. + // + // pair insert_or_assign(const key_type& k, M&& obj): + // pair insert_or_assign(key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map`. If the returned bool is true, insertion took place, and if + // it's false, assignment took place. + // + // iterator insert_or_assign(const_iterator hint, + // const key_type& k, M&& obj): + // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_map::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_map::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_map<>, absl::btree_map<>) +// +// Swaps the contents of two `absl::btree_map` containers. +template +void swap(btree_map &x, btree_map &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_map<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_map::size_type erase_if( + btree_map &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); +} + +// absl::btree_multimap +// +// An `absl::btree_multimap` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement for +// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap +// allows multiple elements with equivalent keys. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multimap` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_multimap`. +// +template , + typename Alloc = std::allocator>> +class btree_multimap + : public container_internal::btree_multimap_container< + container_internal::btree>> { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap map1; + // + // * Initializer List constructor + // + // absl::btree_multimap map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap map7(v.begin(), v.end()); + btree_multimap() {} + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_multimap::merge() + // + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multimap::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multimap::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) +// +// Swaps the contents of two `absl::btree_multimap` containers. +template +void swap(btree_multimap &x, btree_multimap &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multimap<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_multimap::size_type erase_if( + btree_multimap &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); +} + +namespace container_internal { + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + template + static auto key(const V &value ABSL_ATTRIBUTE_LIFETIME_BOUND) + -> decltype((value.first)) { + return value.first; + } + static const Key &key(const slot_type *s) { return slot_policy::key(s); } + static const Key &key(slot_type *s) { return slot_policy::key(s); } + // For use in node handle. + static auto mutable_key(slot_type *s) + -> decltype(slot_policy::mutable_key(s)) { + return slot_policy::mutable_key(s); + } + static mapped_type &value(value_type *value) { return value->second; } +}; + +} // namespace container_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/third_party/absl/absl/container/btree_set.h b/third_party/absl/absl/container/btree_set.h new file mode 100644 index 000000000..51dc42b79 --- /dev/null +++ b/third_party/absl/absl/container/btree_set.h @@ -0,0 +1,821 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_set.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree sets: sorted associative containers of +// values. +// +// * `absl::btree_set<>` +// * `absl::btree_multiset<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::set` and `std::multiset`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::set` and `std::multiset`, which are commonly implemented using +// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree sets perform better than their `std::set` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::set` and `std::multiset` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid +// iterator at the current position. +// +// Another API difference is that btree iterators can be subtracted, and this +// is faster than using std::distance. + +#ifndef ABSL_CONTAINER_BTREE_SET_H_ +#define ABSL_CONTAINER_BTREE_SET_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace container_internal { + +template +struct set_slot_policy; + +template +struct set_params; + +} // namespace container_internal + +// absl::btree_set<> +// +// An `absl::btree_set` is an ordered associative container of unique key +// values designed to be a more efficient replacement for `std::set` (in most +// cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_set` uses a default allocator of `std::allocator` to +// allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_set`. +// +template , + typename Alloc = std::allocator> +class btree_set + : public container_internal::btree_set_container< + container_internal::btree>> { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set set1; + // + // * Initializer List constructor + // + // absl::btree_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_set set7(v.begin(), v.end()); + btree_set() {} + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Any references, pointers, or iterators + // are invalidated. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_set::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_set::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_set<>, absl::btree_set<>) +// +// Swaps the contents of two `absl::btree_set` containers. +template +void swap(btree_set &x, btree_set &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_set<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_set::size_type erase_if(btree_set &set, + Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); +} + +// absl::btree_multiset<> +// +// An `absl::btree_multiset` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement +// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree +// multiset allows equivalent elements. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multiset` uses a default allocator of `std::allocator` +// to allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_multiset`. +// +template , + typename Alloc = std::allocator> +class btree_multiset + : public container_internal::btree_multiset_container< + container_internal::btree>> { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset set1; + // + // * Initializer List constructor + // + // absl::btree_multiset set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_multiset set7(v.begin(), v.end()); + btree_multiset() {} + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::extract_and_get_next() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle along with an iterator to the next + // element. + // + // extract_and_get_next_return_type extract_and_get_next( + // const_iterator position): + // + // Extracts the element at the indicated position, returns a struct + // containing a member named `node`: a node handle owning that extracted + // data and a member named `next`: an iterator pointing to the next element + // in the btree. + using Base::extract_and_get_next; + + // btree_multiset::merge() + // + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multiset::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multiset::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) +// +// Swaps the contents of two `absl::btree_multiset` containers. +template +void swap(btree_multiset &x, btree_multiset &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multiset<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_multiset::size_type erase_if( + btree_multiset & set, Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); +} + +namespace container_internal { + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface for btree_(multi)set. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, *other); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + + template + static const V &key(const V &value) { + return value; + } + static const Key &key(const slot_type *slot) { return *slot; } + static const Key &key(slot_type *slot) { return *slot; } +}; + +} // namespace container_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/third_party/absl/absl/container/btree_test.cc b/third_party/absl/absl/container/btree_test.cc new file mode 100644 index 000000000..d7102fe4d --- /dev/null +++ b/third_party/absl/absl/container/btree_test.cc @@ -0,0 +1,3461 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/btree_test.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/algorithm/container.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/internal/test_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/types/optional.h" + +ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests"); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::absl::test_internal::MovableOnlyInstance; +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; +using ::testing::IsEmpty; +using ::testing::IsNull; +using ::testing::Pair; +using ::testing::SizeIs; + +template +void CheckPairEquals(const T &x, const U &y) { + ABSL_INTERNAL_CHECK(x == y, "Values are unequal."); +} + +template +void CheckPairEquals(const std::pair &x, const std::pair &y) { + CheckPairEquals(x.first, y.first); + CheckPairEquals(x.second, y.second); +} +} // namespace + +// The base class for a sorted associative container checker. TreeType is the +// container type to check and CheckerType is the container type to check +// against. TreeType is expected to be btree_{set,map,multiset,multimap} and +// CheckerType is expected to be {set,map,multiset,multimap}. +template +class base_checker { + public: + using key_type = typename TreeType::key_type; + using value_type = typename TreeType::value_type; + using key_compare = typename TreeType::key_compare; + using pointer = typename TreeType::pointer; + using const_pointer = typename TreeType::const_pointer; + using reference = typename TreeType::reference; + using const_reference = typename TreeType::const_reference; + using size_type = typename TreeType::size_type; + using difference_type = typename TreeType::difference_type; + using iterator = typename TreeType::iterator; + using const_iterator = typename TreeType::const_iterator; + using reverse_iterator = typename TreeType::reverse_iterator; + using const_reverse_iterator = typename TreeType::const_reverse_iterator; + + public: + base_checker() : const_tree_(tree_) {} + base_checker(const base_checker &other) + : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {} + template + base_checker(InputIterator b, InputIterator e) + : tree_(b, e), const_tree_(tree_), checker_(b, e) {} + + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + + template + IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.end()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.end(), + "Checker iterator not at end."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + template + IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.rend()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(), + "Checker iterator not at rend."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + void value_check(const value_type &v) { + typename KeyOfValue::type key_of_value; + const key_type &key = key_of_value(v); + CheckPairEquals(*find(key), v); + lower_bound(key); + upper_bound(key); + equal_range(key); + contains(key); + count(key); + } + void erase_check(const key_type &key) { + EXPECT_FALSE(tree_.contains(key)); + EXPECT_EQ(tree_.find(key), const_tree_.end()); + EXPECT_FALSE(const_tree_.contains(key)); + EXPECT_EQ(const_tree_.find(key), tree_.end()); + EXPECT_EQ(tree_.equal_range(key).first, + const_tree_.equal_range(key).second); + } + + iterator lower_bound(const key_type &key) { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + const_iterator lower_bound(const key_type &key) const { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + iterator upper_bound(const key_type &key) { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + const_iterator upper_bound(const key_type &key) const { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + std::pair equal_range(const key_type &key) { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + std::pair equal_range( + const key_type &key) const { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + iterator find(const key_type &key) { + return iter_check(tree_.find(key), checker_.find(key)); + } + const_iterator find(const key_type &key) const { + return iter_check(tree_.find(key), checker_.find(key)); + } + bool contains(const key_type &key) const { return find(key) != end(); } + size_type count(const key_type &key) const { + size_type res = checker_.count(key); + EXPECT_EQ(res, tree_.count(key)); + return res; + } + + base_checker &operator=(const base_checker &other) { + tree_ = other.tree_; + checker_ = other.checker_; + return *this; + } + + int erase(const key_type &key) { + int size = tree_.size(); + int res = checker_.erase(key); + EXPECT_EQ(res, tree_.count(key)); + EXPECT_EQ(res, tree_.erase(key)); + EXPECT_EQ(tree_.count(key), 0); + EXPECT_EQ(tree_.size(), size - res); + erase_check(key); + return res; + } + iterator erase(iterator iter) { + key_type key = iter.key(); + int size = tree_.size(); + int count = tree_.count(key); + auto checker_iter = checker_.lower_bound(key); + for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) { + ++checker_iter; + } + auto checker_next = checker_iter; + ++checker_next; + checker_.erase(checker_iter); + iter = tree_.erase(iter); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - 1); + EXPECT_EQ(tree_.count(key), count - 1); + if (count == 1) { + erase_check(key); + } + return iter_check(iter, checker_next); + } + + void erase(iterator begin, iterator end) { + int size = tree_.size(); + int count = std::distance(begin, end); + auto checker_begin = checker_.lower_bound(begin.key()); + for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) { + ++checker_begin; + } + auto checker_end = + end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key()); + if (end != tree_.end()) { + for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) { + ++checker_end; + } + } + const auto checker_ret = checker_.erase(checker_begin, checker_end); + const auto tree_ret = tree_.erase(begin, end); + EXPECT_EQ(std::distance(checker_.begin(), checker_ret), + std::distance(tree_.begin(), tree_ret)); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - count); + } + + void clear() { + tree_.clear(); + checker_.clear(); + } + void swap(base_checker &other) { + tree_.swap(other.tree_); + checker_.swap(other.checker_); + } + + void verify() const { + tree_.verify(); + EXPECT_EQ(tree_.size(), checker_.size()); + + // Move through the forward iterators using increment. + auto checker_iter = checker_.begin(); + const_iterator tree_iter(tree_.begin()); + for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) { + CheckPairEquals(*tree_iter, *checker_iter); + } + + // Move through the forward iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + iter_check(tree_iter, checker_iter); + --tree_iter; + --checker_iter; + } + EXPECT_EQ(tree_iter, tree_.begin()); + EXPECT_EQ(checker_iter, checker_.begin()); + + // Move through the reverse iterators using increment. + auto checker_riter = checker_.rbegin(); + const_reverse_iterator tree_riter(tree_.rbegin()); + for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) { + CheckPairEquals(*tree_riter, *checker_riter); + } + + // Move through the reverse iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + riter_check(tree_riter, checker_riter); + --tree_riter; + --checker_riter; + } + EXPECT_EQ(tree_riter, tree_.rbegin()); + EXPECT_EQ(checker_riter, checker_.rbegin()); + } + + const TreeType &tree() const { return tree_; } + + size_type size() const { + EXPECT_EQ(tree_.size(), checker_.size()); + return tree_.size(); + } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { + EXPECT_EQ(tree_.empty(), checker_.empty()); + return tree_.empty(); + } + + protected: + TreeType tree_; + const TreeType &const_tree_; + CheckerType checker_; +}; + +namespace { +// A checker for unique sorted associative containers. TreeType is expected to +// be btree_{set,map} and CheckerType is expected to be {set,map}. +template +class unique_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + unique_checker() : super_type() {} + unique_checker(const unique_checker &other) : super_type(other) {} + template + unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + unique_checker &operator=(const unique_checker &) = default; + + // Insertion routines. + std::pair insert(const value_type &v) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(v); + std::pair tree_res = this->tree_.insert(v); + CheckPairEquals(*tree_res.first, *checker_res.first); + EXPECT_EQ(tree_res.second, checker_res.second); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + tree_res.second); + return tree_res; + } + iterator insert(iterator position, const value_type &v) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); + CheckPairEquals(*tree_res, *checker_res.first); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + checker_res.second); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +// A checker for multiple sorted associative containers. TreeType is expected +// to be btree_{multiset,multimap} and CheckerType is expected to be +// {multiset,multimap}. +template +class multi_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + multi_checker() : super_type() {} + multi_checker(const multi_checker &other) : super_type(other) {} + template + multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + multi_checker &operator=(const multi_checker &) = default; + + // Insertion routines. + iterator insert(const value_type &v) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(v); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + iterator insert(iterator position, const value_type &v) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +template +void DoTest(const char *name, T *b, const std::vector &values) { + typename KeyOfValue::type key_of_value; + + T &mutable_b = *b; + const T &const_b = *b; + + // Test insert. + for (int i = 0; i < values.size(); ++i) { + mutable_b.insert(values[i]); + mutable_b.value_check(values[i]); + } + ASSERT_EQ(mutable_b.size(), values.size()); + + const_b.verify(); + + // Test copy constructor. + T b_copy(const_b); + EXPECT_EQ(b_copy.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]); + } + + // Test range constructor. + T b_range(const_b.begin(), const_b.end()); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test range insertion for values that already exist. + b_range.insert(b_copy.begin(), b_copy.end()); + b_range.verify(); + + // Test range insertion for new values. + b_range.clear(); + b_range.insert(b_copy.begin(), b_copy.end()); + EXPECT_EQ(b_range.size(), b_copy.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test assignment to self. Nothing should change. + b_range.operator=(b_range); + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test assignment of new values. + b_range.clear(); + b_range = b_copy; + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test swap. + b_range.clear(); + b_range.swap(b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + b_range.swap(b_copy); + + // Test non-member function swap. + swap(b_range, b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + swap(b_range, b_copy); + + // Test erase via values. + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(key_of_value(values[i])); + // Erasing a non-existent key should have no effect. + ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test erase via iterators. + mutable_b = b_copy; + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(mutable_b.find(key_of_value(values[i]))); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test insert with hint. + for (int i = 0; i < values.size(); i++) { + mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]); + } + + const_b.verify(); + + // Test range erase. + mutable_b.erase(mutable_b.begin(), mutable_b.end()); + EXPECT_EQ(mutable_b.size(), 0); + const_b.verify(); + + // First half. + mutable_b = b_copy; + typename T::iterator mutable_iter_end = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_b.begin(), mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2); + const_b.verify(); + + // Second half. + mutable_b = b_copy; + typename T::iterator mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin; + mutable_b.erase(mutable_iter_begin, mutable_b.end()); + EXPECT_EQ(mutable_b.size(), values.size() / 2); + const_b.verify(); + + // Second quarter. + mutable_b = b_copy; + mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin; + mutable_iter_end = mutable_iter_begin; + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_iter_begin, mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4); + const_b.verify(); + + mutable_b.clear(); +} + +template +void ConstTest() { + using value_type = typename T::value_type; + typename KeyOfValue::type key_of_value; + + T mutable_b; + const T &const_b = mutable_b; + + // Insert a single value into the container and test looking it up. + value_type value = Generator(2)(2); + mutable_b.insert(value); + EXPECT_TRUE(mutable_b.contains(key_of_value(value))); + EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end()); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end()); + EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value); + EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end()); + EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value); + + // We can only create a non-const iterator from a non-const container. + typename T::iterator mutable_iter(mutable_b.begin()); + EXPECT_EQ(mutable_iter, const_b.begin()); + EXPECT_NE(mutable_iter, const_b.end()); + EXPECT_EQ(const_b.begin(), mutable_iter); + EXPECT_NE(const_b.end(), mutable_iter); + typename T::reverse_iterator mutable_riter(mutable_b.rbegin()); + EXPECT_EQ(mutable_riter, const_b.rbegin()); + EXPECT_NE(mutable_riter, const_b.rend()); + EXPECT_EQ(const_b.rbegin(), mutable_riter); + EXPECT_NE(const_b.rend(), mutable_riter); + + // We can create a const iterator from a non-const iterator. + typename T::const_iterator const_iter(mutable_iter); + EXPECT_EQ(const_iter, mutable_b.begin()); + EXPECT_NE(const_iter, mutable_b.end()); + EXPECT_EQ(mutable_b.begin(), const_iter); + EXPECT_NE(mutable_b.end(), const_iter); + typename T::const_reverse_iterator const_riter(mutable_riter); + EXPECT_EQ(const_riter, mutable_b.rbegin()); + EXPECT_NE(const_riter, mutable_b.rend()); + EXPECT_EQ(mutable_b.rbegin(), const_riter); + EXPECT_NE(mutable_b.rend(), const_riter); + + // Make sure various methods can be invoked on a const container. + const_b.verify(); + ASSERT_TRUE(!const_b.empty()); + EXPECT_EQ(const_b.size(), 1); + EXPECT_GT(const_b.max_size(), 0); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_EQ(const_b.count(key_of_value(value)), 1); +} + +template +void BtreeTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + GTEST_FLAG_GET(random_seed)); + + unique_checker container; + + // Test key insertion/deletion in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test key insertion/deletion in random order. + DoTest("random: ", &container, random_values); +} + +template +void BtreeMultiTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + GTEST_FLAG_GET(random_seed)); + + multi_checker container; + + // Test keys in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test keys in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test keys in random order. + DoTest("random: ", &container, random_values); + + // Test keys in random order w/ duplicates. + std::vector duplicate_values(random_values); + duplicate_values.insert(duplicate_values.end(), random_values.begin(), + random_values.end()); + DoTest("duplicates:", &container, duplicate_values); + + // Test all identical keys. + std::vector identical_values(100); + std::fill(identical_values.begin(), identical_values.end(), + Generator(2)(2)); + DoTest("identical: ", &container, identical_values); +} + +template +void BtreeMapTest() { + using value_type = typename T::value_type; + using mapped_type = typename T::mapped_type; + + mapped_type m = Generator(0)(0); + (void)m; + + T b; + + // Verify we can insert using operator[]. + for (int i = 0; i < 1000; i++) { + value_type v = Generator(1000)(i); + b[v.first] = v.second; + } + EXPECT_EQ(b.size(), 1000); + + // Test whether we can use the "->" operator on iterators and + // reverse_iterators. This stresses the btree_map_params::pair_pointer + // mechanism. + EXPECT_EQ(b.begin()->first, Generator(1000)(0).first); + EXPECT_EQ(b.begin()->second, Generator(1000)(0).second); + EXPECT_EQ(b.rbegin()->first, Generator(1000)(999).first); + EXPECT_EQ(b.rbegin()->second, Generator(1000)(999).second); +} + +template +void BtreeMultiMapTest() { + using mapped_type = typename T::mapped_type; + mapped_type m = Generator(0)(0); + (void)m; +} + +template +void SetTest() { + EXPECT_EQ( + sizeof(absl::btree_set), + 2 * sizeof(void *) + sizeof(typename absl::btree_set::size_type)); + using BtreeSet = absl::btree_set; + BtreeTest>(); +} + +template +void MapTest() { + EXPECT_EQ( + sizeof(absl::btree_map), + 2 * sizeof(void *) + sizeof(typename absl::btree_map::size_type)); + using BtreeMap = absl::btree_map; + BtreeTest>(); + BtreeMapTest(); +} + +TEST(Btree, set_int32) { SetTest(); } +TEST(Btree, set_string) { SetTest(); } +TEST(Btree, set_cord) { SetTest(); } +TEST(Btree, map_int32) { MapTest(); } +TEST(Btree, map_string) { MapTest(); } +TEST(Btree, map_cord) { MapTest(); } + +template +void MultiSetTest() { + EXPECT_EQ( + sizeof(absl::btree_multiset), + 2 * sizeof(void *) + sizeof(typename absl::btree_multiset::size_type)); + using BtreeMSet = absl::btree_multiset; + BtreeMultiTest>(); +} + +template +void MultiMapTest() { + EXPECT_EQ(sizeof(absl::btree_multimap), + 2 * sizeof(void *) + + sizeof(typename absl::btree_multimap::size_type)); + using BtreeMMap = absl::btree_multimap; + BtreeMultiTest>(); + BtreeMultiMapTest(); +} + +TEST(Btree, multiset_int32) { MultiSetTest(); } +TEST(Btree, multiset_string) { MultiSetTest(); } +TEST(Btree, multiset_cord) { MultiSetTest(); } +TEST(Btree, multimap_int32) { MultiMapTest(); } +TEST(Btree, multimap_string) { MultiMapTest(); } +TEST(Btree, multimap_cord) { MultiMapTest(); } + +struct CompareIntToString { + bool operator()(const std::string &a, const std::string &b) const { + return a < b; + } + bool operator()(const std::string &a, int b) const { + return a < absl::StrCat(b); + } + bool operator()(int a, const std::string &b) const { + return absl::StrCat(a) < b; + } + using is_transparent = void; +}; + +struct NonTransparentCompare { + template + bool operator()(const T &t, const U &u) const { + // Treating all comparators as transparent can cause inefficiencies (see + // N3657 C++ proposal). Test that for comparators without 'is_transparent' + // alias (like this one), we do not attempt heterogeneous lookup. + EXPECT_TRUE((std::is_same())); + return t < u; + } +}; + +template +bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) { + return true; +} + +template +bool CanEraseWithEmptyBrace(T, ...) { + return false; +} + +template +void TestHeterogeneous(T table) { + auto lb = table.lower_bound("3"); + EXPECT_EQ(lb, table.lower_bound(3)); + EXPECT_NE(lb, table.lower_bound(4)); + EXPECT_EQ(lb, table.lower_bound({"3"})); + EXPECT_NE(lb, table.lower_bound({})); + + auto ub = table.upper_bound("3"); + EXPECT_EQ(ub, table.upper_bound(3)); + EXPECT_NE(ub, table.upper_bound(5)); + EXPECT_EQ(ub, table.upper_bound({"3"})); + EXPECT_NE(ub, table.upper_bound({})); + + auto er = table.equal_range("3"); + EXPECT_EQ(er, table.equal_range(3)); + EXPECT_NE(er, table.equal_range(4)); + EXPECT_EQ(er, table.equal_range({"3"})); + EXPECT_NE(er, table.equal_range({})); + + auto it = table.find("3"); + EXPECT_EQ(it, table.find(3)); + EXPECT_NE(it, table.find(4)); + EXPECT_EQ(it, table.find({"3"})); + EXPECT_NE(it, table.find({})); + + EXPECT_TRUE(table.contains(3)); + EXPECT_FALSE(table.contains(4)); + EXPECT_TRUE(table.count({"3"})); + EXPECT_FALSE(table.contains({})); + + EXPECT_EQ(1, table.count(3)); + EXPECT_EQ(0, table.count(4)); + EXPECT_EQ(1, table.count({"3"})); + EXPECT_EQ(0, table.count({})); + + auto copy = table; + copy.erase(3); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase(4); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase({"5"}); + EXPECT_EQ(table.size() - 2, copy.size()); + EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr)); + + // Also run it with const T&. + if (std::is_class()) TestHeterogeneous(table); +} + +TEST(Btree, HeterogeneousLookup) { + TestHeterogeneous(btree_set{"1", "3", "5"}); + TestHeterogeneous(btree_map{ + {"1", 1}, {"3", 3}, {"5", 5}}); + TestHeterogeneous( + btree_multiset{"1", "3", "5"}); + TestHeterogeneous(btree_multimap{ + {"1", 1}, {"3", 3}, {"5", 5}}); + + // Only maps have .at() + btree_map map{ + {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}}; + EXPECT_EQ(1, map.at(1)); + EXPECT_EQ(3, map.at({"3"})); + EXPECT_EQ(-1, map.at({})); + const auto &cmap = map; + EXPECT_EQ(1, cmap.at(1)); + EXPECT_EQ(3, cmap.at({"3"})); + EXPECT_EQ(-1, cmap.at({})); +} + +TEST(Btree, NoHeterogeneousLookupWithoutAlias) { + using StringSet = absl::btree_set; + StringSet s; + ASSERT_TRUE(s.insert("hello").second); + ASSERT_TRUE(s.insert("world").second); + EXPECT_TRUE(s.end() == s.find("blah")); + EXPECT_TRUE(s.begin() == s.lower_bound("hello")); + EXPECT_EQ(1, s.count("world")); + EXPECT_TRUE(s.contains("hello")); + EXPECT_TRUE(s.contains("world")); + EXPECT_FALSE(s.contains("blah")); + + using StringMultiSet = + absl::btree_multiset; + StringMultiSet ms; + ms.insert("hello"); + ms.insert("world"); + ms.insert("world"); + EXPECT_TRUE(ms.end() == ms.find("blah")); + EXPECT_TRUE(ms.begin() == ms.lower_bound("hello")); + EXPECT_EQ(2, ms.count("world")); + EXPECT_TRUE(ms.contains("hello")); + EXPECT_TRUE(ms.contains("world")); + EXPECT_FALSE(ms.contains("blah")); +} + +TEST(Btree, DefaultTransparent) { + { + // `int` does not have a default transparent comparator. + // The input value is converted to key_type. + btree_set s = {1}; + double d = 1.1; + EXPECT_EQ(s.begin(), s.find(d)); + EXPECT_TRUE(s.contains(d)); + } + + { + // `std::string` has heterogeneous support. + btree_set s = {"A"}; + EXPECT_EQ(s.begin(), s.find(absl::string_view("A"))); + EXPECT_TRUE(s.contains(absl::string_view("A"))); + } +} + +class StringLike { + public: + StringLike() = default; + + StringLike(const char *s) : s_(s) { // NOLINT + ++constructor_calls_; + } + + bool operator<(const StringLike &a) const { return s_ < a.s_; } + + static void clear_constructor_call_count() { constructor_calls_ = 0; } + + static int constructor_calls() { return constructor_calls_; } + + private: + static int constructor_calls_; + std::string s_; +}; + +int StringLike::constructor_calls_ = 0; + +TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) { + using StringSet = absl::btree_set; + StringSet s; + for (int i = 0; i < 100; ++i) { + ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second); + } + StringLike::clear_constructor_call_count(); + s.find("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.contains("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.count("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.lower_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.upper_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.equal_range("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.erase("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); +} + +// Verify that swapping btrees swaps the key comparison functors and that we can +// use non-default constructible comparators. +struct SubstringLess { + SubstringLess() = delete; + explicit SubstringLess(int length) : n(length) {} + bool operator()(const std::string &a, const std::string &b) const { + return absl::string_view(a).substr(0, n) < + absl::string_view(b).substr(0, n); + } + int n; +}; + +TEST(Btree, SwapKeyCompare) { + using SubstringSet = absl::btree_set; + SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type()); + SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type()); + + ASSERT_TRUE(s1.insert("a").second); + ASSERT_FALSE(s1.insert("aa").second); + + ASSERT_TRUE(s2.insert("a").second); + ASSERT_TRUE(s2.insert("aa").second); + ASSERT_FALSE(s2.insert("aaa").second); + + swap(s1, s2); + + ASSERT_TRUE(s1.insert("b").second); + ASSERT_TRUE(s1.insert("bb").second); + ASSERT_FALSE(s1.insert("bbb").second); + + ASSERT_TRUE(s2.insert("b").second); + ASSERT_FALSE(s2.insert("bb").second); +} + +TEST(Btree, UpperBoundRegression) { + // Regress a bug where upper_bound would default-construct a new key_compare + // instead of copying the existing one. + using SubstringSet = absl::btree_set; + SubstringSet my_set(SubstringLess(3)); + my_set.insert("aab"); + my_set.insert("abb"); + // We call upper_bound("aaa"). If this correctly uses the length 3 + // comparator, aaa < aab < abb, so we should get aab as the result. + // If it instead uses the default-constructed length 2 comparator, + // aa == aa < ab, so we'll get abb as our result. + SubstringSet::iterator it = my_set.upper_bound("aaa"); + ASSERT_TRUE(it != my_set.end()); + EXPECT_EQ("aab", *it); +} + +TEST(Btree, Comparison) { + const int kSetSize = 1201; + absl::btree_set my_set; + for (int i = 0; i < kSetSize; ++i) { + my_set.insert(i); + } + absl::btree_set my_set_copy(my_set); + EXPECT_TRUE(my_set_copy == my_set); + EXPECT_TRUE(my_set == my_set_copy); + EXPECT_FALSE(my_set_copy != my_set); + EXPECT_FALSE(my_set != my_set_copy); + + my_set.insert(kSetSize); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + my_set.erase(kSetSize - 1); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + absl::btree_map my_map; + for (int i = 0; i < kSetSize; ++i) { + my_map[std::string(i, 'a')] = i; + } + absl::btree_map my_map_copy(my_map); + EXPECT_TRUE(my_map_copy == my_map); + EXPECT_TRUE(my_map == my_map_copy); + EXPECT_FALSE(my_map_copy != my_map); + EXPECT_FALSE(my_map != my_map_copy); + + ++my_map_copy[std::string(7, 'a')]; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map_copy = my_map; + my_map["hello"] = kSetSize; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map.erase(std::string(kSetSize - 1, 'a')); + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); +} + +TEST(Btree, RangeCtorSanity) { + std::vector ivec; + ivec.push_back(1); + std::map imap; + imap.insert(std::make_pair(1, 2)); + absl::btree_multiset tmset(ivec.begin(), ivec.end()); + absl::btree_multimap tmmap(imap.begin(), imap.end()); + absl::btree_set tset(ivec.begin(), ivec.end()); + absl::btree_map tmap(imap.begin(), imap.end()); + EXPECT_EQ(1, tmset.size()); + EXPECT_EQ(1, tmmap.size()); + EXPECT_EQ(1, tset.size()); + EXPECT_EQ(1, tmap.size()); +} + +} // namespace + +class BtreeNodePeer { + public: + // Yields the size of a leaf node with a specific number of values. + template + constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { + return btree_node< + set_params, std::allocator, + /*TargetNodeSize=*/256, // This parameter isn't used here. + /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); + } + + // Yields the number of slots in a (non-root) leaf node for this btree. + template + constexpr static size_t GetNumSlotsPerNode() { + return btree_node::kNodeSlots; + } + + template + constexpr static size_t GetMaxFieldType() { + return std::numeric_limits< + typename btree_node::field_type>::max(); + } + + template + constexpr static bool UsesLinearNodeSearch() { + return btree_node::use_linear_search::value; + } + + template + constexpr static bool FieldTypeEqualsSlotType() { + return std::is_same< + typename btree_node::field_type, + typename btree_node::slot_type>::value; + } +}; + +namespace { + +class BtreeMapTest : public ::testing::Test { + public: + struct Key {}; + struct Cmp { + template + bool operator()(T, T) const { + return false; + } + }; + + struct KeyLin { + using absl_btree_prefer_linear_node_search = std::true_type; + }; + struct CmpLin : Cmp { + using absl_btree_prefer_linear_node_search = std::true_type; + }; + + struct KeyBin { + using absl_btree_prefer_linear_node_search = std::false_type; + }; + struct CmpBin : Cmp { + using absl_btree_prefer_linear_node_search = std::false_type; + }; + + template + static bool IsLinear() { + return BtreeNodePeer::UsesLinearNodeSearch>(); + } +}; + +TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) { + // Test requesting linear search by directly exporting an alias. + EXPECT_FALSE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); +} + +TEST_F(BtreeMapTest, LinearChoiceTree) { + // Cmp has precedence, and is forcing binary + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + // Cmp has precedence, and is forcing linear + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + // Cmp has no preference, Key determines linear vs binary. + EXPECT_FALSE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_FALSE((IsLinear())); + // arithmetic key w/ std::less or std::greater: linear + EXPECT_TRUE((IsLinear>())); + EXPECT_TRUE((IsLinear>())); + // arithmetic key w/ custom compare: binary + EXPECT_FALSE((IsLinear())); + // non-arithmetic key: binary + EXPECT_FALSE((IsLinear>())); +} + +TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { + absl::btree_map> m; + + std::unique_ptr &v = m["A"]; + EXPECT_TRUE(v == nullptr); + v = absl::make_unique("X"); + + auto iter = m.find("A"); + EXPECT_EQ("X", *iter->second); +} + +TEST(Btree, InitializerListConstructor) { + absl::btree_set set({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map({{1, 5}, {2, 10}}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + + absl::btree_multimap mmap({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +TEST(Btree, InitializerListInsert) { + absl::btree_set set; + set.insert({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset; + mset.insert({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map; + map.insert({{1, 5}, {2, 10}}); + // Test that inserting one element using an initializer list also works. + map.insert({3, 15}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + EXPECT_EQ(map[3], 15); + + absl::btree_multimap mmap; + mmap.insert({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +template +void AssertKeyCompareStringAdapted() { + using Adapted = typename key_compare_adapter::type; + static_assert( + std::is_same::value || + std::is_same::value, + "key_compare_adapter should have string-adapted this comparator."); +} +template +void AssertKeyCompareNotStringAdapted() { + using Adapted = typename key_compare_adapter::type; + static_assert( + !std::is_same::value && + !std::is_same::value, + "key_compare_adapter shouldn't have string-adapted this comparator."); +} + +TEST(Btree, KeyCompareAdapter) { + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareNotStringAdapted, int>(); + AssertKeyCompareNotStringAdapted, int>(); +} + +TEST(Btree, RValueInsert) { + InstanceTracker tracker; + + absl::btree_set set; + set.insert(MovableOnlyInstance(1)); + set.insert(MovableOnlyInstance(3)); + MovableOnlyInstance two(2); + set.insert(set.find(MovableOnlyInstance(3)), std::move(two)); + auto it = set.find(MovableOnlyInstance(2)); + ASSERT_NE(it, set.end()); + ASSERT_NE(++it, set.end()); + EXPECT_EQ(it->value(), 3); + + absl::btree_multiset mset; + MovableOnlyInstance zero(0); + MovableOnlyInstance zero2(0); + mset.insert(std::move(zero)); + mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2)); + EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2); + + absl::btree_map map; + std::pair p1 = {1, MovableOnlyInstance(5)}; + std::pair p2 = {2, MovableOnlyInstance(10)}; + std::pair p3 = {3, MovableOnlyInstance(15)}; + map.insert(std::move(p1)); + map.insert(std::move(p3)); + map.insert(map.find(3), std::move(p2)); + ASSERT_NE(map.find(2), map.end()); + EXPECT_EQ(map.find(2)->second.value(), 10); + + absl::btree_multimap mmap; + std::pair p4 = {1, MovableOnlyInstance(5)}; + std::pair p5 = {1, MovableOnlyInstance(10)}; + mmap.insert(std::move(p4)); + mmap.insert(mmap.find(1), std::move(p5)); + auto range = mmap.equal_range(1); + auto it1 = range.first; + ASSERT_NE(it1, range.second); + EXPECT_EQ(it1->second.value(), 10); + ASSERT_NE(++it1, range.second); + EXPECT_EQ(it1->second.value(), 5); + EXPECT_EQ(++it1, range.second); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.swaps(), 0); +} + +template +struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase { + using Cmp::Cmp; + CheckedCompareOptedOutCmp() {} + CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT +}; + +// A btree set with a specific number of values per node. Opt out of +// checked_compare so that we can expect exact numbers of comparisons. +template > +class SizedBtreeSet + : public btree_set_container, std::allocator, + BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), + /*Multi=*/false>>> { + using Base = typename SizedBtreeSet::btree_set_container; + + public: + SizedBtreeSet() = default; + using Base::Base; +}; + +template +void ExpectOperationCounts(const int expected_moves, + const int expected_comparisons, + const std::vector &values, + InstanceTracker *tracker, Set *set) { + for (const int v : values) set->insert(MovableOnlyInstance(v)); + set->clear(); + EXPECT_EQ(tracker->moves(), expected_moves); + EXPECT_EQ(tracker->comparisons(), expected_comparisons); + EXPECT_EQ(tracker->copies(), 0); + EXPECT_EQ(tracker->swaps(), 0); + tracker->ResetCopiesMovesSwaps(); +} + +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) +constexpr bool kAsan = true; +#else +constexpr bool kAsan = false; +#endif + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTracking) { + if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode."; + + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet set4; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet set61; + SizedBtreeSet set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeGenerationsEnabled() ? 60 : 61); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(56540, 134212, values, &tracker, &set4); + ExpectOperationCounts(386718, 129807, values, &tracker, &set61); + ExpectOperationCounts(586761, 130310, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(54949, 127531, values, &tracker, &set4); + ExpectOperationCounts(338813, 118266, values, &tracker, &set61); + ExpectOperationCounts(534529, 125279, values, &tracker, &set100); +} + +struct MovableOnlyInstanceThreeWayCompare { + absl::weak_ordering operator()(const MovableOnlyInstance &a, + const MovableOnlyInstance &b) const { + return a.compare(b); + } +}; + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { + if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode."; + + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet + set4; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet + set61; + SizedBtreeSet + set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeGenerationsEnabled() ? 60 : 61); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(56540, 124221, values, &tracker, &set4); + ExpectOperationCounts(386718, 119816, values, &tracker, &set61); + ExpectOperationCounts(586761, 120319, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(54949, 117532, values, &tracker, &set4); + ExpectOperationCounts(338813, 108267, values, &tracker, &set61); + ExpectOperationCounts(534529, 115280, values, &tracker, &set100); +} + +struct NoDefaultCtor { + int num; + explicit NoDefaultCtor(int i) : num(i) {} + + friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) { + return a.num < b.num; + } +}; + +TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) { + absl::btree_map m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second); + } + EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second); + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) { + absl::btree_multimap m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)); + } + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, MapAt) { + absl::btree_map map = {{1, 2}, {2, 4}}; + EXPECT_EQ(map.at(1), 2); + EXPECT_EQ(map.at(2), 4); + map.at(2) = 8; + const absl::btree_map &const_map = map; + EXPECT_EQ(const_map.at(1), 2); + EXPECT_EQ(const_map.at(2), 8); +#ifdef ABSL_HAVE_EXCEPTIONS + EXPECT_THROW(map.at(3), std::out_of_range); +#else + EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at"); +#endif +} + +TEST(Btree, BtreeMultisetEmplace) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto result = s.equal_range(value_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultisetEmplaceHint) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + iter = s.emplace_hint(iter, value_to_insert); + // The new element should be before the previously inserted one. + EXPECT_EQ(iter, s.lower_bound(value_to_insert)); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); +} + +TEST(Btree, BtreeMultimapEmplace) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap m; + auto iter = m.emplace(key_to_insert, value0); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + iter = m.emplace(key_to_insert, value1); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value1); + auto result = m.equal_range(key_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultimapEmplaceHint) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap m; + auto iter = m.emplace(key_to_insert, value0); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + iter = m.emplace_hint(iter, key_to_insert, value1); + // The new element should be before the previously inserted one. + EXPECT_EQ(iter, m.lower_bound(key_to_insert)); + ASSERT_NE(iter, m.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value1); +} + +TEST(Btree, ConstIteratorAccessors) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + set.insert(i); + } + + auto it = set.cbegin(); + auto r_it = set.crbegin(); + for (int i = 0; i < 100; ++i, ++it, ++r_it) { + ASSERT_EQ(*it, i); + ASSERT_EQ(*r_it, 99 - i); + } + EXPECT_EQ(it, set.cend()); + EXPECT_EQ(r_it, set.crend()); +} + +TEST(Btree, StrSplitCompatible) { + const absl::btree_set split_set = absl::StrSplit("a,b,c", ','); + const absl::btree_set expected_set = {"a", "b", "c"}; + + EXPECT_EQ(split_set, expected_set); +} + +TEST(Btree, KeyComp) { + absl::btree_set s; + EXPECT_TRUE(s.key_comp()(1, 2)); + EXPECT_FALSE(s.key_comp()(2, 2)); + EXPECT_FALSE(s.key_comp()(2, 1)); + + absl::btree_map m1; + EXPECT_TRUE(m1.key_comp()(1, 2)); + EXPECT_FALSE(m1.key_comp()(2, 2)); + EXPECT_FALSE(m1.key_comp()(2, 1)); + + // Even though we internally adapt the comparator of `m2` to be three-way and + // heterogeneous, the comparator we expose through key_comp() is the original + // unadapted comparator. + absl::btree_map m2; + EXPECT_TRUE(m2.key_comp()("a", "b")); + EXPECT_FALSE(m2.key_comp()("b", "b")); + EXPECT_FALSE(m2.key_comp()("b", "a")); +} + +TEST(Btree, ValueComp) { + absl::btree_set s; + EXPECT_TRUE(s.value_comp()(1, 2)); + EXPECT_FALSE(s.value_comp()(2, 2)); + EXPECT_FALSE(s.value_comp()(2, 1)); + + absl::btree_map m1; + EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); + + // Even though we internally adapt the comparator of `m2` to be three-way and + // heterogeneous, the comparator we expose through value_comp() is based on + // the original unadapted comparator. + absl::btree_map m2; + EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0))); + EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0))); + EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0))); +} + +// Test that we have the protected members from the std::map::value_compare API. +// See https://en.cppreference.com/w/cpp/container/map/value_compare. +TEST(Btree, MapValueCompProtected) { + struct key_compare { + bool operator()(int l, int r) const { return l < r; } + int id; + }; + using value_compare = absl::btree_map::value_compare; + struct value_comp_child : public value_compare { + explicit value_comp_child(key_compare kc) : value_compare(kc) {} + int GetId() const { return comp.id; } + }; + value_comp_child c(key_compare{10}); + EXPECT_EQ(c.GetId(), 10); +} + +TEST(Btree, DefaultConstruction) { + absl::btree_set s; + absl::btree_map m; + absl::btree_multiset ms; + absl::btree_multimap mm; + + EXPECT_TRUE(s.empty()); + EXPECT_TRUE(m.empty()); + EXPECT_TRUE(ms.empty()); + EXPECT_TRUE(mm.empty()); +} + +TEST(Btree, SwissTableHashable) { + static constexpr int kValues = 10000; + std::vector values(kValues); + std::iota(values.begin(), values.end(), 0); + std::vector> map_values; + for (int v : values) map_values.emplace_back(v, -v); + + using set = absl::btree_set; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + set{}, + set{1}, + set{2}, + set{1, 2}, + set{2, 1}, + set(values.begin(), values.end()), + set(values.rbegin(), values.rend()), + })); + + using mset = absl::btree_multiset; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mset{}, + mset{1}, + mset{1, 1}, + mset{2}, + mset{2, 2}, + mset{1, 2}, + mset{1, 1, 2}, + mset{1, 2, 2}, + mset{1, 1, 2, 2}, + mset(values.begin(), values.end()), + mset(values.rbegin(), values.rend()), + })); + + using map = absl::btree_map; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + map{}, + map{{1, 0}}, + map{{1, 1}}, + map{{2, 0}}, + map{{2, 2}}, + map{{1, 0}, {2, 1}}, + map(map_values.begin(), map_values.end()), + map(map_values.rbegin(), map_values.rend()), + })); + + using mmap = absl::btree_multimap; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mmap{}, + mmap{{1, 0}}, + mmap{{1, 1}}, + mmap{{1, 0}, {1, 1}}, + mmap{{1, 1}, {1, 0}}, + mmap{{2, 0}}, + mmap{{2, 2}}, + mmap{{1, 0}, {2, 1}}, + mmap(map_values.begin(), map_values.end()), + mmap(map_values.rbegin(), map_values.rend()), + })); +} + +TEST(Btree, ComparableSet) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetsDifferentLength) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {1, 2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); +} + +TEST(Btree, ComparableMultiset) { + absl::btree_multiset s1 = {1, 2}; + absl::btree_multiset s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMap) { + absl::btree_map s1 = {{1, 2}}; + absl::btree_map s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMultimap) { + absl::btree_multimap s1 = {{1, 2}}; + absl::btree_multimap s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetWithCustomComparator) { + // As specified by + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section + // [container.requirements.general].12, ordering associative containers always + // uses default '<' operator + // - even if otherwise the container uses custom functor. + absl::btree_set> s1 = {1, 2}; + absl::btree_set> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, EraseReturnsIterator) { + absl::btree_set set = {1, 2, 3, 4, 5}; + auto result_it = set.erase(set.begin(), set.find(3)); + EXPECT_EQ(result_it, set.find(3)); + result_it = set.erase(set.find(5)); + EXPECT_EQ(result_it, set.end()); +} + +TEST(Btree, ExtractAndInsertNodeHandleSet) { + absl::btree_set src1 = {1, 2, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5)); + absl::btree_set other; + absl::btree_set::insert_return_type res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_set src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.value(), 3); +} + +template +void TestExtractWithTrackingForSet() { + InstanceTracker tracker; + { + Set s; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (s.size() < kSize) { + s.insert(MovableOnlyInstance(s.size())); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = s.extract(MovableOnlyInstance(i)); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node + s.insert(std::move(nh)); + EXPECT_EQ(s.size(), kSize); + + // Extract with iterator + auto it = s.find(MovableOnlyInstance(i)); + nh = s.extract(it); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node and hint + s.insert(s.begin(), std::move(nh)); + EXPECT_EQ(s.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +template +void TestExtractWithTrackingForMap() { + InstanceTracker tracker; + { + Map m; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (m.size() < kSize) { + m.insert( + {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())}); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = m.extract(CopyableMovableInstance(i)); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node + m.insert(std::move(nh)); + EXPECT_EQ(m.size(), kSize); + + // Extract with iterator + auto it = m.find(CopyableMovableInstance(i)); + nh = m.extract(it); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node and hint + m.insert(m.begin(), std::move(nh)); + EXPECT_EQ(m.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +TEST(Btree, ExtractTracking) { + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForMap< + absl::btree_map>(); + TestExtractWithTrackingForMap< + absl::btree_multimap>(); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiSet) { + absl::btree_multiset src1 = {1, 2, 3, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5)); + absl::btree_multiset other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multiset src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3, 3)); + EXPECT_EQ(res, ++other.find(3)); +} + +TEST(Btree, ExtractAndInsertNodeHandleMap) { + absl::btree_map src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_map other; + absl::btree_map::insert_return_type res = + other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_map src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.key(), 3); + EXPECT_EQ(res.node.mapped(), 6); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { + absl::btree_multimap src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_multimap other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multimap src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6))); + EXPECT_EQ(res, ++other.begin()); +} + +TEST(Btree, ExtractMultiMapEquivalentKeys) { + // Note: using string keys means a three-way comparator. + absl::btree_multimap map; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + map.insert({absl::StrCat(i), j}); + } + } + + for (int i = 0; i < 100; ++i) { + const std::string key = absl::StrCat(i); + auto node_handle = map.extract(key); + EXPECT_EQ(node_handle.key(), key); + EXPECT_EQ(node_handle.mapped(), 0) << i; + } + + for (int i = 0; i < 100; ++i) { + const std::string key = absl::StrCat(i); + auto node_handle = map.extract(key); + EXPECT_EQ(node_handle.key(), key); + EXPECT_EQ(node_handle.mapped(), 1) << i; + } +} + +TEST(Btree, ExtractAndGetNextSet) { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + EXPECT_EQ(extracted_and_next.node.value(), 3); + EXPECT_EQ(*extracted_and_next.next, 4); +} + +TEST(Btree, ExtractAndGetNextMultiSet) { + absl::btree_multiset src = {1, 2, 3, 4, 5}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + EXPECT_EQ(extracted_and_next.node.value(), 3); + EXPECT_EQ(*extracted_and_next.next, 4); +} + +TEST(Btree, ExtractAndGetNextMap) { + absl::btree_map src = {{1, 2}, {3, 4}, {5, 6}}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6))); + EXPECT_EQ(extracted_and_next.node.key(), 3); + EXPECT_EQ(extracted_and_next.node.mapped(), 4); + EXPECT_THAT(*extracted_and_next.next, Pair(5, 6)); +} + +TEST(Btree, ExtractAndGetNextMultiMap) { + absl::btree_multimap src = {{1, 2}, {3, 4}, {5, 6}}; + auto it = src.find(3); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6))); + EXPECT_EQ(extracted_and_next.node.key(), 3); + EXPECT_EQ(extracted_and_next.node.mapped(), 4); + EXPECT_THAT(*extracted_and_next.next, Pair(5, 6)); +} + +TEST(Btree, ExtractAndGetNextEndIter) { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto it = src.find(5); + auto extracted_and_next = src.extract_and_get_next(it); + EXPECT_THAT(src, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(extracted_and_next.node.value(), 5); + EXPECT_EQ(extracted_and_next.next, src.end()); +} + +TEST(Btree, ExtractDoesntCauseExtraMoves) { +#ifdef _MSC_VER + GTEST_SKIP() << "This test fails on MSVC."; +#endif + + using Set = absl::btree_set; + std::array, 3> extracters = { + [](Set &s) { auto node = s.extract(s.begin()); }, + [](Set &s) { auto ret = s.extract_and_get_next(s.begin()); }, + [](Set &s) { auto node = s.extract(MovableOnlyInstance(0)); }}; + + InstanceTracker tracker; + for (int i = 0; i < 3; ++i) { + Set s; + s.insert(MovableOnlyInstance(0)); + tracker.ResetCopiesMovesSwaps(); + + extracters[i](s); + // We expect to see exactly 1 move: from the original slot into the + // extracted node. + EXPECT_EQ(tracker.copies(), 0) << i; + EXPECT_EQ(tracker.moves(), 1) << i; + EXPECT_EQ(tracker.swaps(), 0) << i; + } +} + +// For multisets, insert with hint also affects correctness because we need to +// insert immediately before the hint if possible. +struct InsertMultiHintData { + int key; + int not_key; + bool operator==(const InsertMultiHintData other) const { + return key == other.key && not_key == other.not_key; + } +}; + +struct InsertMultiHintDataKeyCompare { + using is_transparent = void; + bool operator()(const InsertMultiHintData a, + const InsertMultiHintData b) const { + return a.key < b.key; + } + bool operator()(const int a, const InsertMultiHintData b) const { + return a < b.key; + } + bool operator()(const InsertMultiHintData a, const int b) const { + return a.key < b; + } +}; + +TEST(Btree, InsertHintNodeHandle) { + // For unique sets, insert with hint is just a performance optimization. + // Test that insert works correctly when the hint is right or wrong. + { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto nh = src.extract(src.find(3)); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + absl::btree_set other = {0, 100}; + // Test a correct hint. + auto it = other.insert(other.lower_bound(3), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 100)); + EXPECT_EQ(it, other.find(3)); + + nh = src.extract(src.find(5)); + // Test an incorrect hint. + it = other.insert(other.end(), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 5, 100)); + EXPECT_EQ(it, other.find(5)); + } + + absl::btree_multiset src = + {{1, 2}, {3, 4}, {3, 5}}; + auto nh = src.extract(src.lower_bound(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4})); + absl::btree_multiset + other = {{3, 1}, {3, 2}, {3, 3}}; + auto it = other.insert(--other.end(), std::move(nh)); + EXPECT_THAT( + other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, + InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); + EXPECT_EQ(it, --(--other.end())); + + nh = src.extract(src.find(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5})); + it = other.insert(other.begin(), std::move(nh)); + EXPECT_THAT(other, + ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1}, + InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, + InsertMultiHintData{3, 3})); + EXPECT_EQ(it, other.begin()); +} + +struct IntCompareToCmp { + absl::weak_ordering operator()(int a, int b) const { + if (a < b) return absl::weak_ordering::less; + if (a > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +TEST(Btree, MergeIntoUniqueContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoUniqueContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { + absl::btree_map src1 = {{1, 1}, {2, 2}, {3, 3}}; + absl::btree_multimap> src2 = { + {5, 5}, {4, 1}, {4, 4}, {3, 2}}; + absl::btree_multimap dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3))); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2), + Pair(4, 1), Pair(4, 4), Pair(5, 5))); +} + +TEST(Btree, MergeIntoSetMovableOnly) { + absl::btree_set src; + src.insert(MovableOnlyInstance(1)); + absl::btree_multiset dst1; + dst1.insert(MovableOnlyInstance(2)); + absl::btree_set dst2; + + // Test merge into multiset. + dst1.merge(src); + + EXPECT_TRUE(src.empty()); + // ElementsAre/ElementsAreArray don't work with move-only types. + ASSERT_THAT(dst1, SizeIs(2)); + EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1)); + EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2)); + + // Test merge into set. + dst2.merge(dst1); + + EXPECT_TRUE(dst1.empty()); + ASSERT_THAT(dst2, SizeIs(2)); + EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1)); + EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2)); +} + +struct KeyCompareToWeakOrdering { + template + absl::weak_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::weak_ordering::less + : a == b ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } +}; + +struct KeyCompareToStrongOrdering { + template + absl::strong_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::strong_ordering::less + : a == b ? absl::strong_ordering::equal + : absl::strong_ordering::greater; + } +}; + +TEST(Btree, UserProvidedKeyCompareToComparators) { + absl::btree_set weak_set = {1, 2, 3}; + EXPECT_TRUE(weak_set.contains(2)); + EXPECT_FALSE(weak_set.contains(4)); + + absl::btree_set strong_set = {1, 2, 3}; + EXPECT_TRUE(strong_set.contains(2)); + EXPECT_FALSE(strong_set.contains(4)); +} + +TEST(Btree, TryEmplaceBasicTest) { + absl::btree_map m; + + // Should construct a string from the literal. + m.try_emplace(1, "one"); + EXPECT_EQ(1, m.size()); + + // Try other string constructors and const lvalue key. + const int key(42); + m.try_emplace(key, 3, 'a'); + m.try_emplace(2, std::string("two")); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {1, "one"}, {2, "two"}, {42, "aaa"}})); +} + +TEST(Btree, TryEmplaceWithHintWorks) { + // Use a counting comparator here to verify that hint is used. + int calls = 0; + auto cmp = [&calls](int x, int y) { + ++calls; + return x < y; + }; + using Cmp = decltype(cmp); + + // Use a map that is opted out of key_compare being adapted so we can expect + // strict comparison call limits. + absl::btree_map> m(cmp); + for (int i = 0; i < 128; ++i) { + m.emplace(i, i); + } + + // Sanity check for the comparator + calls = 0; + m.emplace(127, 127); + EXPECT_GE(calls, 4); + + // Try with begin hint: + calls = 0; + auto it = m.try_emplace(m.begin(), -1, -1); + EXPECT_EQ(129, m.size()); + EXPECT_EQ(it, m.begin()); + EXPECT_LE(calls, 2); + + // Try with end hint: + calls = 0; + std::pair pair1024 = {1024, 1024}; + it = m.try_emplace(m.end(), pair1024.first, pair1024.second); + EXPECT_EQ(130, m.size()); + EXPECT_EQ(it, --m.end()); + EXPECT_LE(calls, 2); + + // Try value already present, bad hint; ensure no duplicate added: + calls = 0; + it = m.try_emplace(m.end(), 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_GE(calls, 4); + EXPECT_EQ(it, m.find(16)); + + // Try value already present, hint points directly to it: + calls = 0; + it = m.try_emplace(it, 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + EXPECT_EQ(it, m.find(16)); + + m.erase(2); + EXPECT_EQ(129, m.size()); + auto hint = m.find(3); + // Try emplace in the middle of two other elements. + calls = 0; + m.try_emplace(hint, 2, 2); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithBadHint) { + absl::btree_map m = {{1, 1}, {9, 9}}; + + // Bad hint (too small), should still emplace: + auto it = m.try_emplace(m.begin(), 2, 2); + EXPECT_EQ(it, ++m.begin()); + EXPECT_THAT(m, ElementsAreArray( + std::vector>{{1, 1}, {2, 2}, {9, 9}})); + + // Bad hint, too large this time: + it = m.try_emplace(++(++m.begin()), 0, 0); + EXPECT_EQ(it, m.begin()); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {0, 0}, {1, 1}, {2, 2}, {9, 9}})); +} + +TEST(Btree, TryEmplaceMaintainsSortedOrder) { + absl::btree_map m; + std::pair pair5 = {5, "five"}; + + // Test both lvalue & rvalue emplace. + m.try_emplace(10, "ten"); + m.try_emplace(pair5.first, pair5.second); + EXPECT_EQ(2, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + + int int100{100}; + m.try_emplace(int100, "hundred"); + m.try_emplace(1, "one"); + EXPECT_EQ(4, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1); + EXPECT_EQ(0, m[1]); +} + +TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1, 10, 'a'); + EXPECT_EQ(std::string(10, 'a'), m[1]); +} + +template +using BtreeSetAlloc = absl::btree_set, Alloc>; + +TEST(Btree, AllocatorPropagation) { + TestAllocPropagation(); +} + +TEST(Btree, MinimumAlignmentAllocator) { + absl::btree_set, MinimumAlignmentAlloc> set; + + // Do some basic operations. Test that everything is fine when allocator uses + // minimal alignment. + for (int8_t i = 0; i < 100; ++i) set.insert(i); + set.erase(set.find(50), set.end()); + for (int8_t i = 51; i < 101; ++i) set.insert(i); + + EXPECT_EQ(set.size(), 100); +} + +TEST(Btree, EmptyTree) { + absl::btree_set s; + EXPECT_TRUE(s.empty()); + EXPECT_EQ(s.size(), 0); + EXPECT_GT(s.max_size(), 0); +} + +bool IsEven(int k) { return k % 2 == 0; } + +TEST(Btree, EraseIf) { + // Test that erase_if works with all the container types and supports lambdas. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3); + EXPECT_THAT(s, ElementsAre(1, 3)); + } + { + absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; + EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3); + EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); + } + { + absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; + EXPECT_EQ( + erase_if(m, [](std::pair kv) { return kv.first > 3; }), + 2); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); + } + { + absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, + {6, 6}, {6, 7}, {100, 6}}; + EXPECT_EQ( + erase_if(m, + [](std::pair kv) { return kv.second == 6; }), + 3); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); + } + // Test that erasing all elements from a large set works and test support for + // function pointers. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(2 * i); + EXPECT_EQ(erase_if(s, IsEven), 1000); + EXPECT_THAT(s, IsEmpty()); + } + // Test that erase_if supports other format of function pointers. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + EXPECT_EQ(erase_if(s, &IsEven), 2); + EXPECT_THAT(s, ElementsAre(1, 3, 5)); + } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } +} + +TEST(Btree, InsertOrAssign) { + absl::btree_map m = {{1, 1}, {3, 3}}; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, 4); + EXPECT_EQ(*ret.first, value_type(4, 4)); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(3, 100); + EXPECT_EQ(*ret.first, value_type(3, 100)); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, 200); + EXPECT_EQ(*hint_ret, value_type(3, 200)); + hint_ret = m.insert_or_assign(m.find(1), 0, 1); + EXPECT_EQ(*hint_ret, value_type(0, 1)); + // Test with bad hint. + hint_ret = m.insert_or_assign(m.end(), -1, 1); + EXPECT_EQ(*hint_ret, value_type(-1, 1)); + + EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200), + Pair(4, 4))); +} + +TEST(Btree, InsertOrAssignMovableOnly) { + absl::btree_map m; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, MovableOnlyInstance(4)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4))); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(4, MovableOnlyInstance(100)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100))); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200)); + EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200))); + + EXPECT_EQ(m.size(), 2); +} + +TEST(Btree, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::btree_map m; + m.erase(n); + m.count(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) { + const absl::string_view names[] = {"n1", "n2"}; + + absl::btree_set name_set1{std::begin(names), std::end(names)}; + EXPECT_THAT(name_set1, ElementsAreArray(names)); + + absl::btree_set name_set2; + name_set2.insert(std::begin(names), std::end(names)); + EXPECT_THAT(name_set2, ElementsAreArray(names)); +} + +// A type that is explicitly convertible from int and counts constructor calls. +struct ConstructorCounted { + explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; } + bool operator==(int other) const { return i == other; } + + int i; + static int constructor_calls; +}; +int ConstructorCounted::constructor_calls = 0; + +struct ConstructorCountedCompare { + bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; } + bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; } + bool operator()(const ConstructorCounted &a, + const ConstructorCounted &b) const { + return a.i < b.i; + } + using is_transparent = void; +}; + +TEST(Btree, + SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { + const int i[] = {0, 1, 1}; + ConstructorCounted::constructor_calls = 0; + + absl::btree_set set{ + std::begin(i), std::end(i)}; + EXPECT_THAT(set, ElementsAre(0, 1)); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); + + set.insert(std::begin(i), std::end(i)); + EXPECT_THAT(set, ElementsAre(0, 1)); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); +} + +TEST(Btree, + SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) { + const int i[] = {0, 1}; + + absl::btree_set> s1{std::begin(i), std::end(i)}; + EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); + + absl::btree_set> s2; + s2.insert(std::begin(i), std::end(i)); + EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); +} + +// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that +// prevents explicit conversions between pair types. +// We only run this test for the libstdc++ from GCC 7 or newer because we can't +// reliably check the libstdc++ version prior to that release. +#if !defined(__GLIBCXX__) || \ + (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7) +TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) { + const std::pair names[] = {{"n1", 1}, {"n2", 2}}; + + absl::btree_map name_map1{std::begin(names), + std::end(names)}; + EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2))); + + absl::btree_map name_map2; + name_map2.insert(std::begin(names), std::end(names)); + EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2))); +} + +TEST(Btree, + MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { + const std::pair i[] = {{0, 1}, {1, 2}, {1, 3}}; + ConstructorCounted::constructor_calls = 0; + + absl::btree_map map{ + std::begin(i), std::end(i)}; + EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); + + map.insert(std::begin(i), std::end(i)); + EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); +} + +TEST(Btree, + MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) { + const std::pair i[] = {{0, 1}, {1, 2}}; + + absl::btree_map, int> m1{std::begin(i), std::end(i)}; + EXPECT_THAT(m1, + ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); + + absl::btree_map, int> m2; + m2.insert(std::begin(i), std::end(i)); + EXPECT_THAT(m2, + ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); +} + +TEST(Btree, HeterogeneousTryEmplace) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m.try_emplace(sv, 1); + EXPECT_EQ(m[s], 1); + + m.try_emplace(m.end(), sv, 2); + EXPECT_EQ(m[s], 1); +} + +TEST(Btree, HeterogeneousOperatorMapped) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m[sv] = 1; + EXPECT_EQ(m[s], 1); + + m[sv] = 2; + EXPECT_EQ(m[s], 2); +} + +TEST(Btree, HeterogeneousInsertOrAssign) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m.insert_or_assign(sv, 1); + EXPECT_EQ(m[s], 1); + + m.insert_or_assign(m.end(), sv, 2); + EXPECT_EQ(m[s], 2); +} +#endif + +// This test requires std::launder for mutable key access in node handles. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 +TEST(Btree, NodeHandleMutableKeyAccess) { + { + absl::btree_map map; + + map["key1"] = "mapped"; + + auto nh = map.extract(map.begin()); + nh.key().resize(3); + map.insert(std::move(nh)); + + EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); + } + // Also for multimap. + { + absl::btree_multimap map; + + map.emplace("key1", "mapped"); + + auto nh = map.extract(map.begin()); + nh.key().resize(3); + map.insert(std::move(nh)); + + EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); + } +} +#endif + +struct MultiKey { + int i1; + int i2; +}; + +bool operator==(const MultiKey a, const MultiKey b) { + return a.i1 == b.i1 && a.i2 == b.i2; +} + +// A heterogeneous comparator that has different equivalence classes for +// different lookup types. +struct MultiKeyComp { + using is_transparent = void; + bool operator()(const MultiKey a, const MultiKey b) const { + if (a.i1 != b.i1) return a.i1 < b.i1; + return a.i2 < b.i2; + } + bool operator()(const int a, const MultiKey b) const { return a < b.i1; } + bool operator()(const MultiKey a, const int b) const { return a.i1 < b; } +}; + +// A heterogeneous, three-way comparator that has different equivalence classes +// for different lookup types. +struct MultiKeyThreeWayComp { + using is_transparent = void; + absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const { + if (a.i1 < b.i1) return absl::weak_ordering::less; + if (a.i1 > b.i1) return absl::weak_ordering::greater; + if (a.i2 < b.i2) return absl::weak_ordering::less; + if (a.i2 > b.i2) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } + absl::weak_ordering operator()(const int a, const MultiKey b) const { + if (a < b.i1) return absl::weak_ordering::less; + if (a > b.i1) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } + absl::weak_ordering operator()(const MultiKey a, const int b) const { + if (a.i1 < b) return absl::weak_ordering::less; + if (a.i1 > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +template +class BtreeMultiKeyTest : public ::testing::Test {}; +using MultiKeyComps = ::testing::Types; +TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps); + +TYPED_TEST(BtreeMultiKeyTest, EqualRange) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + set.insert({i, j}); + } + } + + for (int i = 0; i < 100; ++i) { + auto equal_range = set.equal_range(i); + EXPECT_EQ(equal_range.first->i1, i); + EXPECT_EQ(equal_range.first->i2, 0) << i; + EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i; + } +} + +TYPED_TEST(BtreeMultiKeyTest, Extract) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + set.insert({i, j}); + } + } + + for (int i = 0; i < 100; ++i) { + auto node_handle = set.extract(i); + EXPECT_EQ(node_handle.value().i1, i); + EXPECT_EQ(node_handle.value().i2, 0) << i; + } + + for (int i = 0; i < 100; ++i) { + auto node_handle = set.extract(i); + EXPECT_EQ(node_handle.value().i1, i); + EXPECT_EQ(node_handle.value().i2, 1) << i; + } +} + +TYPED_TEST(BtreeMultiKeyTest, Erase) { + absl::btree_set set = { + {1, 1}, {2, 1}, {2, 2}, {3, 1}}; + EXPECT_EQ(set.erase(2), 2); + EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1})); +} + +TYPED_TEST(BtreeMultiKeyTest, Count) { + const absl::btree_set set = { + {1, 1}, {2, 1}, {2, 2}, {3, 1}}; + EXPECT_EQ(set.count(2), 2); +} + +TEST(Btree, SetIteratorsAreConst) { + using Set = absl::btree_set; + EXPECT_TRUE( + (std::is_same::value)); + EXPECT_TRUE( + (std::is_same::value)); + + using MSet = absl::btree_multiset; + EXPECT_TRUE( + (std::is_same::value)); + EXPECT_TRUE( + (std::is_same::value)); +} + +TEST(Btree, AllocConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set(alloc); + + set.insert({1, 2, 3}); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocInitializerListConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set({1, 2, 3}, alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocRangeConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + std::vector v = {1, 2, 3}; + Set set(v.begin(), v.end(), alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocCopyConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(set1, alloc2); + + EXPECT_THAT(set1, ElementsAre(1, 2, 3)); + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used1, set1.size() * sizeof(int)); + EXPECT_EQ(bytes_used1, bytes_used2); +} + +TEST(Btree, AllocMoveConstructor_SameAlloc) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set1(alloc); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + Set set2(std::move(set1), alloc); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_EQ(bytes_used, original_bytes_used); +} + +TEST(Btree, AllocMoveConstructor_DifferentAlloc) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used1; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(std::move(set1), alloc2); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + // We didn't free these bytes allocated by `set1` yet. + EXPECT_EQ(bytes_used1, original_bytes_used); + EXPECT_EQ(bytes_used2, original_bytes_used); +} + +bool IntCmp(const int a, const int b) { return a < b; } + +TEST(Btree, SupportsFunctionPtrComparator) { + absl::btree_set set(IntCmp); + set.insert({1, 2, 3}); + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_TRUE(set.key_comp()(1, 2)); + EXPECT_TRUE(set.value_comp()(1, 2)); + + absl::btree_map map(&IntCmp); + map[1] = 1; + EXPECT_THAT(map, ElementsAre(Pair(1, 1))); + EXPECT_TRUE(map.key_comp()(1, 2)); + EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +template +struct TransparentPassThroughComp { + using is_transparent = void; + + // This will fail compilation if we attempt a comparison that Compare does not + // support, and the failure will happen inside the function implementation so + // it can't be avoided by using SFINAE on this comparator. + template + bool operator()(const T &lhs, const U &rhs) const { + return Compare()(lhs, rhs); + } +}; + +TEST(Btree, + SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) { + absl::btree_set> set; + set.insert(MultiKey{1, 2}); + EXPECT_TRUE(set.contains(1)); +} + +TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) { + absl::btree_set set = {{}, MultiKeyComp{}}; +} + +TEST(Btree, InvalidComparatorsCaught) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + { + struct ZeroAlwaysLessCmp { + bool operator()(int lhs, int rhs) const { + if (lhs == 0) return true; + return lhs < rhs; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct ThreeWayAlwaysLessCmp { + absl::weak_ordering operator()(int, int) const { + return absl::weak_ordering::less; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct SumGreaterZeroCmp { + bool operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return false; + return lhs + rhs > 0; + } + }; + absl::btree_set set; + // Note: '!' only needs to be escaped when it's the first character. + EXPECT_DEATH(set.insert({0, 1, 2}), + R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex"); + } + { + struct ThreeWaySumGreaterZeroCmp { + absl::weak_ordering operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return absl::weak_ordering::equivalent; + + if (lhs + rhs > 0) return absl::weak_ordering::less; + if (lhs + rhs == 0) return absl::weak_ordering::equivalent; + return absl::weak_ordering::greater; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } + // Verify that we detect cases of comparators that violate transitivity. + // When the comparators below check for the presence of an optional field, + // they violate transitivity because instances that have the optional field + // compare differently with each other from how they compare with instances + // that don't have the optional field. + struct ClockTime { + absl::optional hour; + int minute; + }; + // `comp(a,b) && comp(b,c) && !comp(a,c)` violates transitivity. + ClockTime a = {absl::nullopt, 1}; + ClockTime b = {2, 5}; + ClockTime c = {6, 0}; + { + struct NonTransitiveTimeCmp { + bool operator()(ClockTime lhs, ClockTime rhs) const { + if (lhs.hour.has_value() && rhs.hour.has_value() && + *lhs.hour != *rhs.hour) { + return *lhs.hour < *rhs.hour; + } + return lhs.minute < rhs.minute; + } + }; + NonTransitiveTimeCmp cmp; + ASSERT_TRUE(cmp(a, b) && cmp(b, c) && !cmp(a, c)); + absl::btree_set set; + EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly"); + absl::btree_multiset mset; + EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly"); + } + { + struct ThreeWayNonTransitiveTimeCmp { + absl::weak_ordering operator()(ClockTime lhs, ClockTime rhs) const { + if (lhs.hour.has_value() && rhs.hour.has_value() && + *lhs.hour != *rhs.hour) { + return *lhs.hour < *rhs.hour ? absl::weak_ordering::less + : absl::weak_ordering::greater; + } + return lhs.minute < rhs.minute ? absl::weak_ordering::less + : lhs.minute == rhs.minute ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } + }; + ThreeWayNonTransitiveTimeCmp cmp; + ASSERT_TRUE(cmp(a, b) < 0 && cmp(b, c) < 0 && cmp(a, c) > 0); + absl::btree_set set; + EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly"); + absl::btree_multiset mset; + EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly"); + } +} + +TEST(Btree, MutatedKeysCaught) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + struct IntPtrCmp { + bool operator()(int *lhs, int *rhs) const { return *lhs < *rhs; } + }; + { + absl::btree_set set; + int arr[] = {0, 1, 2}; + set.insert({&arr[0], &arr[1], &arr[2]}); + arr[0] = 100; + EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly"); + } + { + absl::btree_multiset set; + int arr[] = {0, 1, 2}; + set.insert({&arr[0], &arr[0], &arr[1], &arr[1], &arr[2], &arr[2]}); + arr[0] = 100; + EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly"); + } +} + +#ifndef _MSC_VER +// This test crashes on MSVC. +TEST(Btree, InvalidIteratorUse) { + if (!BtreeGenerationsEnabled()) + GTEST_SKIP() << "Generation validation for iterators is disabled."; + + // Invalid memory use can trigger use-after-free in ASan, HWASAN or + // invalidated iterator assertions. + constexpr const char *kInvalidMemoryDeathMessage = + "use-after-free|invalidated iterator"; + + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.begin(); + set.erase(it++); + EXPECT_DEATH(set.erase(it++), kInvalidMemoryDeathMessage); + } + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.insert(20).first; + set.insert(30); + EXPECT_DEATH(*it, kInvalidMemoryDeathMessage); + } + { + absl::btree_set set; + for (int i = 0; i < 10000; ++i) set.insert(i); + auto it = set.find(5000); + ASSERT_NE(it, set.end()); + set.erase(1); + EXPECT_DEATH(*it, kInvalidMemoryDeathMessage); + } + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.insert(20).first; + set.insert(30); + EXPECT_DEATH(void(it == set.begin()), kInvalidMemoryDeathMessage); + EXPECT_DEATH(void(set.begin() == it), kInvalidMemoryDeathMessage); + } +} +#endif + +class OnlyConstructibleByAllocator { + explicit OnlyConstructibleByAllocator(int i) : i_(i) {} + + public: + OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other) + : i_(other.i_) {} + OnlyConstructibleByAllocator &operator=( + const OnlyConstructibleByAllocator &other) { + i_ = other.i_; + return *this; + } + int Get() const { return i_; } + bool operator==(int i) const { return i_ == i; } + + private: + template + friend class OnlyConstructibleAllocator; + + int i_; +}; + +template +class OnlyConstructibleAllocator : public std::allocator { + public: + OnlyConstructibleAllocator() = default; + template + explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator &) {} + + void construct(OnlyConstructibleByAllocator *p, int i) { + new (p) OnlyConstructibleByAllocator(i); + } + template + void construct(Pair *p, const int i) { + OnlyConstructibleByAllocator only(i); + new (p) Pair(std::move(only), i); + } + + template + struct rebind { + using other = OnlyConstructibleAllocator; + }; +}; + +struct OnlyConstructibleByAllocatorComp { + using is_transparent = void; + bool operator()(OnlyConstructibleByAllocator a, + OnlyConstructibleByAllocator b) const { + return a.Get() < b.Get(); + } + bool operator()(int a, OnlyConstructibleByAllocator b) const { + return a < b.Get(); + } + bool operator()(OnlyConstructibleByAllocator a, int b) const { + return a.Get() < b; + } +}; + +TEST(Btree, OnlyConstructibleByAllocatorType) { + const std::array arr = {3, 4}; + { + absl::btree_set> + set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(arr.begin(), arr.end()); + EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); + } + { + absl::btree_multiset> + set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + // TODO(ezb): fix insert_multi to allow this to compile. + // set.insert(arr.begin(), arr.end()); + EXPECT_THAT(set, ElementsAre(1, 2)); + } + { + absl::btree_map> + map; + map.emplace(1); + map.emplace_hint(map.end(), 2); + map.insert(arr.begin(), arr.end()); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); + } + { + absl::btree_multimap> + map; + map.emplace(1); + map.emplace_hint(map.end(), 2); + // TODO(ezb): fix insert_multi to allow this to compile. + // map.insert(arr.begin(), arr.end()); + EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2))); + } +} + +class NotAssignable { + public: + explicit NotAssignable(int i) : i_(i) {} + NotAssignable(const NotAssignable &other) : i_(other.i_) {} + NotAssignable &operator=(NotAssignable &&other) = delete; + int Get() const { return i_; } + bool operator==(int i) const { return i_ == i; } + friend bool operator<(NotAssignable a, NotAssignable b) { + return a.i_ < b.i_; + } + + private: + int i_; +}; + +TEST(Btree, NotAssignableType) { + { + absl::btree_set set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(NotAssignable(3)); + set.insert(set.end(), NotAssignable(4)); + EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); + set.erase(set.begin()); + EXPECT_THAT(set, ElementsAre(2, 3, 4)); + } + { + absl::btree_multiset set; + set.emplace(1); + set.emplace_hint(set.end(), 2); + set.insert(NotAssignable(2)); + set.insert(set.end(), NotAssignable(3)); + EXPECT_THAT(set, ElementsAre(1, 2, 2, 3)); + set.erase(set.begin()); + EXPECT_THAT(set, ElementsAre(2, 2, 3)); + } + { + absl::btree_map map; + map.emplace(NotAssignable(1), 1); + map.emplace_hint(map.end(), NotAssignable(2), 2); + map.insert({NotAssignable(3), 3}); + map.insert(map.end(), {NotAssignable(4), 4}); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); + map.erase(map.begin()); + EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4))); + } + { + absl::btree_multimap map; + map.emplace(NotAssignable(1), 1); + map.emplace_hint(map.end(), NotAssignable(2), 2); + map.insert({NotAssignable(2), 3}); + map.insert(map.end(), {NotAssignable(3), 3}); + EXPECT_THAT(map, + ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3))); + map.erase(map.begin()); + EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3))); + } +} + +struct ArenaLike { + void* recycled = nullptr; + size_t recycled_size = 0; +}; + +// A very simple implementation of arena allocation. +template +class ArenaLikeAllocator : public std::allocator { + public: + // Standard library containers require the ability to allocate objects of + // different types which they can do so via rebind.other. + template + struct rebind { + using other = ArenaLikeAllocator; + }; + + explicit ArenaLikeAllocator(ArenaLike* arena) noexcept : arena_(arena) {} + + ~ArenaLikeAllocator() { + if (arena_->recycled != nullptr) { + delete [] static_cast(arena_->recycled); + arena_->recycled = nullptr; + } + } + + template + explicit ArenaLikeAllocator(const ArenaLikeAllocator& other) noexcept + : arena_(other.arena_) {} + + T* allocate(size_t num_objects, const void* = nullptr) { + size_t size = num_objects * sizeof(T); + if (arena_->recycled != nullptr && arena_->recycled_size == size) { + T* result = static_cast(arena_->recycled); + arena_->recycled = nullptr; + return result; + } + return new T[num_objects]; + } + + void deallocate(T* p, size_t num_objects) { + size_t size = num_objects * sizeof(T); + + // Simulate writing to the freed memory as an actual arena allocator might + // do. This triggers an error report if the memory is poisoned. + memset(p, 0xde, size); + + if (arena_->recycled == nullptr) { + arena_->recycled = p; + arena_->recycled_size = size; + } else { + delete [] p; + } + } + + ArenaLike* arena_; +}; + +// This test verifies that an arena allocator that reuses memory will not be +// asked to free poisoned BTree memory. +TEST(Btree, ReusePoisonMemory) { + using Alloc = ArenaLikeAllocator; + using Set = absl::btree_set, Alloc>; + ArenaLike arena; + Alloc alloc(&arena); + Set set(alloc); + + set.insert(0); + set.erase(0); + set.insert(0); +} + +TEST(Btree, IteratorSubtraction) { + absl::BitGen bitgen; + std::vector vec; + // Randomize the set's insertion order so the nodes aren't all full. + for (int i = 0; i < 1000000; ++i) vec.push_back(i); + absl::c_shuffle(vec, bitgen); + + absl::btree_set set; + for (int i : vec) set.insert(i); + + for (int i = 0; i < 1000; ++i) { + size_t begin = absl::Uniform(bitgen, 0u, set.size()); + size_t end = absl::Uniform(bitgen, begin, set.size()); + ASSERT_EQ(end - begin, set.find(end) - set.find(begin)) + << begin << " " << end; + } +} + +TEST(Btree, DereferencingEndIterator) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + absl::btree_set set; + for (int i = 0; i < 1000; ++i) set.insert(i); + EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex"); +} + +TEST(Btree, InvalidIteratorComparison) { + if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; + + absl::btree_set set1, set2; + for (int i = 0; i < 1000; ++i) { + set1.insert(i); + set2.insert(i); + } + + constexpr const char *kValueInitDeathMessage = + "Comparing default-constructed iterator with .*non-default-constructed " + "iterator"; + typename absl::btree_set::iterator iter1, iter2; + EXPECT_EQ(iter1, iter2); + EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage); + EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage); + + constexpr const char *kDifferentContainerDeathMessage = + "Comparing iterators from different containers"; + iter1 = set1.begin(); + iter2 = set2.begin(); + EXPECT_DEATH(void(iter1 == iter2), kDifferentContainerDeathMessage); + EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage); +} + +TEST(Btree, InvalidPointerUse) { + if (!kAsan) + GTEST_SKIP() << "We only detect invalid pointer use in ASan mode."; + + absl::btree_set set; + set.insert(0); + const int *ptr = &*set.begin(); + set.insert(1); + EXPECT_DEATH(std::cout << *ptr, "use-after-free"); + size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode(); + for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i); + ptr = &*set.begin(); + set.insert(static_cast(slots_per_node)); + EXPECT_DEATH(std::cout << *ptr, "use-after-free"); +} + +template +void TestBasicFunctionality(Set set) { + using value_type = typename Set::value_type; + for (int i = 0; i < 100; ++i) { set.insert(value_type(i)); } + for (int i = 50; i < 100; ++i) { set.erase(value_type(i)); } + auto it = set.begin(); + for (int i = 0; i < 50; ++i, ++it) { + ASSERT_EQ(set.find(value_type(i)), it) << i; + } +} + +template +struct alignas(align) OveralignedKey { + explicit OveralignedKey(int i) : key(i) {} + bool operator<(const OveralignedKey &other) const { return key < other.key; } + int key = 0; +}; + +TEST(Btree, OveralignedKey) { + // Test basic functionality with both even and odd numbers of slots per node. + // The goal here is to detect cases where alignment may be incorrect. + TestBasicFunctionality( + SizedBtreeSet, /*TargetValuesPerNode=*/8>()); + TestBasicFunctionality( + SizedBtreeSet, /*TargetValuesPerNode=*/9>()); +} + +TEST(Btree, FieldTypeEqualsSlotType) { + // This breaks if we try to do layout_type::Pointer because + // slot_type is the same as field_type. + using set_type = absl::btree_set; + static_assert(BtreeNodePeer::FieldTypeEqualsSlotType(), ""); + TestBasicFunctionality(set_type()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/btree_test.h b/third_party/absl/absl/container/btree_test.h new file mode 100644 index 000000000..624908072 --- /dev/null +++ b/third_party/absl/absl/container/btree_test.h @@ -0,0 +1,166 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/strings/cord.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Like remove_const but propagates the removal through std::pair. +template +struct remove_pair_const { + using type = typename std::remove_const::type; +}; +template +struct remove_pair_const > { + using type = std::pair::type, + typename remove_pair_const::type>; +}; + +// Utility class to provide an accessor for a key given a value. The default +// behavior is to treat the value as a pair and return the first element. +template +struct KeyOfValue { + struct type { + const K& operator()(const V& p) const { return p.first; } + }; +}; + +// Partial specialization of KeyOfValue class for when the key and value are +// the same type such as in set<> and btree_set<>. +template +struct KeyOfValue { + struct type { + const K& operator()(const K& k) const { return k; } + }; +}; + +inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; +} + +template +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + K operator()(int i) const { + assert(i <= maxval); + return K(i); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + std::string operator()(int i) const { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + Cord operator()(int i) const { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } +}; + +template +struct Generator > { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : tgen(m), ugen(m) {} + std::pair operator()(int i) const { + return std::make_pair(tgen(i), ugen(i)); + } +}; + +// Generate n values for our tests and benchmarks. Value range is [0, maxval]. +inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) { + for (int i = values.size(); i < n; i++) { + int value; + do { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; +} + +// Generates n values in the range [0, maxval]. +template +std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) { + vec.push_back(gen(nums[i])); + } + + return vec; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/third_party/absl/absl/container/fixed_array.h b/third_party/absl/absl/container/fixed_array.h new file mode 100644 index 000000000..9f1c813dc --- /dev/null +++ b/third_party/absl/absl/container/fixed_array.h @@ -0,0 +1,556 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ +#define ABSL_CONTAINER_FIXED_ARRAY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +constexpr static auto kFixedArrayUseDefault = static_cast(-1); + +// ----------------------------------------------------------------------------- +// FixedArray +// ----------------------------------------------------------------------------- +// +// A `FixedArray` provides a run-time fixed-size array, allocating a small array +// inline for efficiency. +// +// Most users should not specify the `N` template parameter and let `FixedArray` +// automatically determine the number of elements to store inline based on +// `sizeof(T)`. If `N` is specified, the `FixedArray` implementation will use +// inline storage for arrays with a length <= `N`. +// +// Note that a `FixedArray` constructed with a `size_type` argument will +// default-initialize its values by leaving trivially constructible types +// uninitialized (e.g. int, int[4], double), and others default-constructed. +// This matches the behavior of c-style arrays and `std::array`, but not +// `std::vector`. +template > +class FixedArray { + static_assert(!std::is_array::value || std::extent::value > 0, + "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() { + return !absl::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast(N)); + + FixedArray(const FixedArray& other) noexcept(NoexceptCopyable()) + : FixedArray(other, + AllocatorTraits::select_on_container_copy_construction( + other.storage_.alloc())) {} + + FixedArray(const FixedArray& other, + const allocator_type& a) noexcept(NoexceptCopyable()) + : FixedArray(other.begin(), other.end(), a) {} + + FixedArray(FixedArray&& other) noexcept(NoexceptMovable()) + : FixedArray(std::move(other), other.storage_.alloc()) {} + + FixedArray(FixedArray&& other, + const allocator_type& a) noexcept(NoexceptMovable()) + : FixedArray(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), a) {} + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template * = nullptr> + FixedArray(Iterator first, Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const { return storage_.size(); } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const { return size() == 0; } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const { return size() * sizeof(value_type); } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return AsValueType(storage_.begin()); + } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return AsValueType(storage_.begin()); + } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the fixed + // array, or throws std::out_of_range + reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return begin(); + } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data() + size(); } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return data() + size(); + } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return reverse_iterator(end()); + } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return rbegin(); + } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return reverse_iterator(begin()); + } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return rend(); + } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) { std::fill(begin(), end(), val); } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { + return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), + rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs < rhs); + } + + template + friend H AbslHashValue(H h, const FixedArray& v) { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), + v.size()); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template , + size_t InnerN = std::extent::value> + struct StorageElementWrapper { + InnerT array[InnerN]; + }; + + using StorageElement = + absl::conditional_t::value, + StorageElementWrapper, value_type>; + + static pointer AsValueType(pointer ptr) { return ptr; } + static pointer AsValueType(StorageElementWrapper* ptr) { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage { + public: + StorageElement* data() { return reinterpret_cast(buff_); } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + void* RedzoneBegin() { return &redzone_begin_; } + void* RedzoneEnd() { return &redzone_end_ + 1; } +#endif // ABSL_HAVE_ADDRESS_SANITIZER + + private: + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage { + public: + StorageElement* data() { return nullptr; } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} + }; + + using InlinedStorage = + absl::conditional_t; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage { + public: + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} + + ~Storage() noexcept { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateDestruct(size()); + } else { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const { return size_alloc_.template get<0>(); } + StorageElement* begin() const { return data_; } + StorageElement* end() const { return begin() + size(); } + allocator_type& alloc() { return size_alloc_.template get<1>(); } + const allocator_type& alloc() const { + return size_alloc_.template get<1>(); + } + + private: + static bool UsingInlinedStorage(size_type n) { + return n <= inline_elements; + } + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ABSL_ATTRIBUTE_NOINLINE +#endif // ABSL_HAVE_ADDRESS_SANITIZER + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } else { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size())); + } + } + + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +template +constexpr size_t FixedArray::kInlineBytesDefault; + +template +constexpr typename FixedArray::size_type + FixedArray::inline_elements; +#endif + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), + data() + n); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), + RedzoneBegin()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, + RedzoneEnd()); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), + data()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/third_party/absl/absl/container/fixed_array_benchmark.cc b/third_party/absl/absl/container/fixed_array_benchmark.cc new file mode 100644 index 000000000..db6663e60 --- /dev/null +++ b/third_party/absl/absl/container/fixed_array_benchmark.cc @@ -0,0 +1,67 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "absl/container/fixed_array.h" +#include "benchmark/benchmark.h" + +namespace { + +// For benchmarking -- simple class with constructor and destructor that +// set an int to a constant.. +class SimpleClass { + public: + SimpleClass() : i(3) {} + ~SimpleClass() { i = 0; } + + private: + int i; +}; + +template +void BM_FixedArray(benchmark::State& state) { + const int size = state.range(0); + for (auto _ : state) { + absl::FixedArray fa(size); + benchmark::DoNotOptimize(fa.data()); + } +} +BENCHMARK_TEMPLATE(BM_FixedArray, char, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, char, 65536)->Range(0, 1 << 16); + +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 65536)->Range(0, 1 << 16); + +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, absl::kFixedArrayUseDefault) + ->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 0)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 1)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 16)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 256)->Range(0, 1 << 16); +BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 65536)->Range(0, 1 << 16); + +} // namespace diff --git a/third_party/absl/absl/container/fixed_array_exception_safety_test.cc b/third_party/absl/absl/container/fixed_array_exception_safety_test.cc new file mode 100644 index 000000000..e5f59299b --- /dev/null +++ b/third_party/absl/absl/container/fixed_array_exception_safety_test.cc @@ -0,0 +1,201 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/config.h" +#include "absl/container/fixed_array.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/exception_safety_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace { + +constexpr size_t kInlined = 25; +constexpr size_t kSmallSize = kInlined / 2; +constexpr size_t kLargeSize = kInlined * 2; + +constexpr int kInitialValue = 5; +constexpr int kUpdatedValue = 10; + +using ::testing::TestThrowingCtor; + +using Thrower = testing::ThrowingValue; +using ThrowAlloc = + testing::ThrowingAllocator; +using MoveThrower = testing::ThrowingValue; +using MoveThrowAlloc = + testing::ThrowingAllocator; + +using FixedArr = absl::FixedArray; +using FixedArrWithAlloc = absl::FixedArray; + +using MoveFixedArr = absl::FixedArray; +using MoveFixedArrWithAlloc = + absl::FixedArray; + +TEST(FixedArrayExceptionSafety, CopyConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large); +} + +TEST(FixedArrayExceptionSafety, CopyConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor(small); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor(large); +} + +TEST(FixedArrayExceptionSafety, MoveConstructor) { + TestThrowingCtor(FixedArr(kSmallSize)); + TestThrowingCtor(FixedArr(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor(MoveFixedArr(kSmallSize)); + TestThrowingCtor(MoveFixedArr(kLargeSize)); +} + +TEST(FixedArrayExceptionSafety, MoveConstructorWithAlloc) { + TestThrowingCtor(FixedArrWithAlloc(kSmallSize)); + TestThrowingCtor(FixedArrWithAlloc(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor(MoveFixedArrWithAlloc(kSmallSize)); + TestThrowingCtor(MoveFixedArrWithAlloc(kLargeSize)); +} + +TEST(FixedArrayExceptionSafety, SizeConstructor) { + TestThrowingCtor(kSmallSize); + TestThrowingCtor(kLargeSize); +} + +TEST(FixedArrayExceptionSafety, SizeConstructorWithAlloc) { + TestThrowingCtor(kSmallSize); + TestThrowingCtor(kLargeSize); +} + +TEST(FixedArrayExceptionSafety, SizeValueConstructor) { + TestThrowingCtor(kSmallSize, Thrower()); + TestThrowingCtor(kLargeSize, Thrower()); +} + +TEST(FixedArrayExceptionSafety, SizeValueConstructorWithAlloc) { + TestThrowingCtor(kSmallSize, Thrower()); + TestThrowingCtor(kLargeSize, Thrower()); +} + +TEST(FixedArrayExceptionSafety, IteratorConstructor) { + auto small = FixedArr(kSmallSize); + TestThrowingCtor(small.begin(), small.end()); + + auto large = FixedArr(kLargeSize); + TestThrowingCtor(large.begin(), large.end()); +} + +TEST(FixedArrayExceptionSafety, IteratorConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor(small.begin(), small.end()); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor(large.begin(), large.end()); +} + +TEST(FixedArrayExceptionSafety, InitListConstructor) { + constexpr int small_inlined = 3; + using SmallFixedArr = absl::FixedArray; + + TestThrowingCtor(std::initializer_list{}); + // Test inlined allocation + TestThrowingCtor( + std::initializer_list{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor(std::initializer_list{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) { + constexpr int small_inlined = 3; + using SmallFixedArrWithAlloc = + absl::FixedArray; + + TestThrowingCtor(std::initializer_list{}); + // Test inlined allocation + TestThrowingCtor( + std::initializer_list{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor(std::initializer_list{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +template +testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) { + int sum = 0; + for (const auto& thrower : *fixed_arr) { + sum += thrower.Get(); + } + return testing::AssertionSuccess() << "Values sum to [" << sum << "]"; +} + +TEST(FixedArrayExceptionSafety, Fill) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory) + .WithOperation([&](FixedArr* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE( + test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + +TEST(FixedArrayExceptionSafety, FillWithAlloc) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory) + .WithOperation([&](FixedArrWithAlloc* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + +} // namespace + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/third_party/absl/absl/container/fixed_array_test.cc b/third_party/absl/absl/container/fixed_array_test.cc new file mode 100644 index 000000000..2421b5fc8 --- /dev/null +++ b/third_party/absl/absl/container/fixed_array_test.cc @@ -0,0 +1,853 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/fixed_array.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/base/options.h" +#include "absl/container/internal/test_allocator.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" + +using ::testing::ElementsAreArray; + +namespace { + +// Helper routine to determine if a absl::FixedArray used stack allocation. +template +static bool IsOnStack(const ArrayType& a) { + return a.size() <= ArrayType::inline_elements; +} + +class ConstructionTester { + public: + ConstructionTester() : self_ptr_(this), value_(0) { constructions++; } + ~ConstructionTester() { + assert(self_ptr_ == this); + self_ptr_ = nullptr; + destructions++; + } + + // These are incremented as elements are constructed and destructed so we can + // be sure all elements are properly cleaned up. + static int constructions; + static int destructions; + + void CheckConstructed() { assert(self_ptr_ == this); } + + void set(int value) { value_ = value; } + int get() { return value_; } + + private: + // self_ptr_ should always point to 'this' -- that's how we can be sure the + // constructor has been called. + ConstructionTester* self_ptr_; + int value_; +}; + +int ConstructionTester::constructions = 0; +int ConstructionTester::destructions = 0; + +// ThreeInts will initialize its three ints to the value stored in +// ThreeInts::counter. The constructor increments counter so that each object +// in an array of ThreeInts will have different values. +class ThreeInts { + public: + ThreeInts() { + x_ = counter; + y_ = counter; + z_ = counter; + ++counter; + } + + static int counter; + + int x_, y_, z_; +}; + +int ThreeInts::counter = 0; + +TEST(FixedArrayTest, CopyCtor) { + absl::FixedArray on_stack(5); + std::iota(on_stack.begin(), on_stack.end(), 0); + absl::FixedArray stack_copy = on_stack; + EXPECT_THAT(stack_copy, ElementsAreArray(on_stack)); + EXPECT_TRUE(IsOnStack(stack_copy)); + + absl::FixedArray allocated(15); + std::iota(allocated.begin(), allocated.end(), 0); + absl::FixedArray alloced_copy = allocated; + EXPECT_THAT(alloced_copy, ElementsAreArray(allocated)); + EXPECT_FALSE(IsOnStack(alloced_copy)); +} + +TEST(FixedArrayTest, MoveCtor) { + absl::FixedArray, 10> on_stack(5); + for (int i = 0; i < 5; ++i) { + on_stack[i] = absl::make_unique(i); + } + + absl::FixedArray, 10> stack_copy = std::move(on_stack); + for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i); + EXPECT_EQ(stack_copy.size(), on_stack.size()); + + absl::FixedArray, 10> allocated(15); + for (int i = 0; i < 15; ++i) { + allocated[i] = absl::make_unique(i); + } + + absl::FixedArray, 10> alloced_copy = + std::move(allocated); + for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i); + EXPECT_EQ(allocated.size(), alloced_copy.size()); +} + +TEST(FixedArrayTest, SmallObjects) { + // Small object arrays + { + // Short arrays should be on the stack + absl::FixedArray array(4); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Large arrays should be on the heap + absl::FixedArray array(1048576); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays of <= default size should be on the stack + absl::FixedArray array(100); + EXPECT_TRUE(IsOnStack(array)); + } + + { + // Arrays of > default size should be on the heap + absl::FixedArray array(101); + EXPECT_FALSE(IsOnStack(array)); + } + + { + // Arrays with different size elements should use approximately + // same amount of stack space + absl::FixedArray array1(0); + absl::FixedArray array2(0); + EXPECT_LE(sizeof(array1), sizeof(array2) + 100); + EXPECT_LE(sizeof(array2), sizeof(array1) + 100); + } + + { + // Ensure that vectors are properly constructed inside a fixed array. + absl::FixedArray> array(2); + EXPECT_EQ(0, array[0].size()); + EXPECT_EQ(0, array[1].size()); + } + + { + // Regardless of absl::FixedArray implementation, check that a type with a + // low alignment requirement and a non power-of-two size is initialized + // correctly. + ThreeInts::counter = 1; + absl::FixedArray array(2); + EXPECT_EQ(1, array[0].x_); + EXPECT_EQ(1, array[0].y_); + EXPECT_EQ(1, array[0].z_); + EXPECT_EQ(2, array[1].x_); + EXPECT_EQ(2, array[1].y_); + EXPECT_EQ(2, array[1].z_); + } +} + +TEST(FixedArrayTest, AtThrows) { + absl::FixedArray a = {1, 2, 3}; + EXPECT_EQ(a.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(FixedArrayTest, Hardened) { +#if !defined(NDEBUG) || ABSL_OPTION_HARDENED + absl::FixedArray a = {1, 2, 3}; + EXPECT_EQ(a[2], 3); + EXPECT_DEATH_IF_SUPPORTED(a[3], ""); + EXPECT_DEATH_IF_SUPPORTED(a[-1], ""); + + absl::FixedArray empty(0); + EXPECT_DEATH_IF_SUPPORTED(empty[0], ""); + EXPECT_DEATH_IF_SUPPORTED(empty[-1], ""); + EXPECT_DEATH_IF_SUPPORTED(empty.front(), ""); + EXPECT_DEATH_IF_SUPPORTED(empty.back(), ""); +#endif +} + +TEST(FixedArrayRelationalsTest, EqualArrays) { + for (int i = 0; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + + EXPECT_TRUE(a1 == a2); + EXPECT_FALSE(a1 != a2); + EXPECT_TRUE(a2 == a1); + EXPECT_FALSE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_FALSE(a1 > a2); + EXPECT_FALSE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_TRUE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_TRUE(a2 >= a1); + } +} + +TEST(FixedArrayRelationalsTest, UnequalArrays) { + for (int i = 1; i < 10; ++i) { + absl::FixedArray a1(i); + std::iota(a1.begin(), a1.end(), 0); + absl::FixedArray a2(a1.begin(), a1.end()); + --a2[i / 2]; + + EXPECT_FALSE(a1 == a2); + EXPECT_TRUE(a1 != a2); + EXPECT_FALSE(a2 == a1); + EXPECT_TRUE(a2 != a1); + EXPECT_FALSE(a1 < a2); + EXPECT_TRUE(a1 > a2); + EXPECT_TRUE(a2 < a1); + EXPECT_FALSE(a2 > a1); + EXPECT_FALSE(a1 <= a2); + EXPECT_TRUE(a1 >= a2); + EXPECT_TRUE(a2 <= a1); + EXPECT_FALSE(a2 >= a1); + } +} + +template +static void TestArray(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(stack_elements); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + absl::FixedArray array(n); + + EXPECT_THAT(array.size(), n); + EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n); + EXPECT_THAT(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + array[i].CheckConstructed(); + } + // Check that no other elements were constructed + EXPECT_THAT(ConstructionTester::constructions, n); + + // Test operator[] + for (int i = 0; i < n; i++) { + array[i].set(i); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i); + EXPECT_THAT(array.data()[i].get(), i); + } + + // Test data() + for (int i = 0; i < n; i++) { + array.data()[i].set(i + 1); + } + for (int i = 0; i < n; i++) { + EXPECT_THAT(array[i].get(), i + 1); + EXPECT_THAT(array.data()[i].get(), i + 1); + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +template +static void TestArrayOfArrays(int n) { + SCOPED_TRACE(n); + SCOPED_TRACE(inline_elements); + SCOPED_TRACE(elements_per_inner_array); + ConstructionTester::constructions = 0; + ConstructionTester::destructions = 0; + { + using InnerArray = ConstructionTester[elements_per_inner_array]; + // Heap-allocate the FixedArray to avoid blowing the stack frame. + auto array_ptr = + absl::make_unique>(n); + auto& array = *array_ptr; + + ASSERT_EQ(array.size(), n); + ASSERT_EQ(array.memsize(), + sizeof(ConstructionTester) * elements_per_inner_array * n); + ASSERT_EQ(array.begin() + n, array.end()); + + // Check that all elements were constructed + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].CheckConstructed(); + } + } + // Check that no other elements were constructed + ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array); + + // Test operator[] + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array[i])[j].set(i * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j); + } + } + + // Test data() + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j); + } + } + for (int i = 0; i < n; i++) { + for (int j = 0; j < elements_per_inner_array; j++) { + ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j); + ASSERT_EQ((array.data()[i])[j].get(), + (i + 1) * elements_per_inner_array + j); + } + } + } // Close scope containing 'array'. + + // Check that all constructed elements were destructed. + EXPECT_EQ(ConstructionTester::constructions, + ConstructionTester::destructions); +} + +TEST(IteratorConstructorTest, NonInline) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, Inline) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray const fixed( + kInput, kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, NonPod) { + char const* kInput[] = {"red", "orange", "yellow", "green", + "blue", "indigo", "violet"}; + absl::FixedArray const fixed(kInput, + kInput + ABSL_ARRAYSIZE(kInput)); + ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); + for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { + ASSERT_EQ(kInput[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromEmptyVector) { + std::vector const empty; + absl::FixedArray const fixed(empty.begin(), empty.end()); + EXPECT_EQ(0, fixed.size()); + EXPECT_EQ(empty.size(), fixed.size()); +} + +TEST(IteratorConstructorTest, FromNonEmptyVector) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + std::vector const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + ASSERT_EQ(items.size(), fixed.size()); + for (size_t i = 0; i < items.size(); ++i) { + ASSERT_EQ(items[i], fixed[i]); + } +} + +TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) { + int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; + std::list const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); + absl::FixedArray const fixed(items.begin(), items.end()); + EXPECT_THAT(fixed, testing::ElementsAreArray(kInput)); +} + +TEST(InitListConstructorTest, InitListConstruction) { + absl::FixedArray fixed = {1, 2, 3}; + EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3})); +} + +TEST(FillConstructorTest, NonEmptyArrays) { + absl::FixedArray stack_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); + + absl::FixedArray heap_array(4, 1); + EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); +} + +TEST(FillConstructorTest, EmptyArray) { + absl::FixedArray empty_fill(0, 1); + absl::FixedArray empty_size(0); + EXPECT_EQ(empty_fill, empty_size); +} + +TEST(FillConstructorTest, NotTriviallyCopyable) { + std::string str = "abcd"; + absl::FixedArray strings = {str, str, str, str}; + + absl::FixedArray array(4, str); + EXPECT_EQ(array, strings); +} + +TEST(FillConstructorTest, Disambiguation) { + absl::FixedArray a(1, 2); + EXPECT_THAT(a, testing::ElementsAre(2)); +} + +TEST(FixedArrayTest, ManySizedArrays) { + std::vector sizes; + for (int i = 1; i < 100; i++) sizes.push_back(i); + for (int i = 100; i <= 1000; i += 100) sizes.push_back(i); + for (int n : sizes) { + TestArray<0>(n); + TestArray<1>(n); + TestArray<64>(n); + TestArray<1000>(n); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) { + for (int n = 1; n < 1000; n++) { + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n))); + ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n))); + } +} + +TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) { + for (int n = 1; n < 1000; n++) { + TestArrayOfArrays<2, 0>(n); + TestArrayOfArrays<2, 1>(n); + TestArrayOfArrays<2, 64>(n); + TestArrayOfArrays<2, 1000>(n); + } +} + +// If value_type is put inside of a struct container, +// we might evoke this error in a hardened build unless data() is carefully +// written, so check on that. +// error: call to int __builtin___sprintf_chk(etc...) +// will always overflow destination buffer [-Werror] +TEST(FixedArrayTest, AvoidParanoidDiagnostics) { + absl::FixedArray buf(32); + sprintf(buf.data(), "foo"); // NOLINT(runtime/printf) +} + +TEST(FixedArrayTest, TooBigInlinedSpace) { + struct TooBig { + char c[1 << 20]; + }; // too big for even one on the stack + + // Simulate the data members of absl::FixedArray, a pointer and a size_t. + struct Data { + TooBig* p; + size_t size; + }; + + // Make sure TooBig objects are not inlined for 0 or default size. + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "0-sized absl::FixedArray should have same size as Data."); + static_assert(alignof(absl::FixedArray) == alignof(Data), + "0-sized absl::FixedArray should have same alignment as Data."); + static_assert(sizeof(absl::FixedArray) == sizeof(Data), + "default-sized absl::FixedArray should have same size as Data"); + static_assert( + alignof(absl::FixedArray) == alignof(Data), + "default-sized absl::FixedArray should have same alignment as Data."); +} + +// PickyDelete EXPECTs its class-scope deallocation funcs are unused. +struct PickyDelete { + PickyDelete() {} + ~PickyDelete() {} + void operator delete(void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete(p); + } + void operator delete[](void* p) { + EXPECT_TRUE(false) << __FUNCTION__; + ::operator delete[](p); + } +}; + +TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray a(5); } + +TEST(FixedArrayTest, Data) { + static const int kInput[] = {2, 3, 5, 7, 11, 13, 17}; + absl::FixedArray fa(std::begin(kInput), std::end(kInput)); + EXPECT_EQ(fa.data(), &*fa.begin()); + EXPECT_EQ(fa.data(), &fa[0]); + + const absl::FixedArray& cfa = fa; + EXPECT_EQ(cfa.data(), &*cfa.begin()); + EXPECT_EQ(cfa.data(), &cfa[0]); +} + +TEST(FixedArrayTest, Empty) { + absl::FixedArray empty(0); + absl::FixedArray inline_filled(1); + absl::FixedArray heap_filled(1); + EXPECT_TRUE(empty.empty()); + EXPECT_FALSE(inline_filled.empty()); + EXPECT_FALSE(heap_filled.empty()); +} + +TEST(FixedArrayTest, FrontAndBack) { + absl::FixedArray inlined = {1, 2, 3}; + EXPECT_EQ(inlined.front(), 1); + EXPECT_EQ(inlined.back(), 3); + + absl::FixedArray allocated = {1, 2, 3}; + EXPECT_EQ(allocated.front(), 1); + EXPECT_EQ(allocated.back(), 3); + + absl::FixedArray one_element = {1}; + EXPECT_EQ(one_element.front(), one_element.back()); +} + +TEST(FixedArrayTest, ReverseIteratorInlined) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, ReverseIteratorAllocated) { + absl::FixedArray a = {0, 1, 2, 3, 4}; + + int counter = 5; + for (absl::FixedArray::reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); + iter != a.rend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); + + counter = 5; + for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { + counter--; + EXPECT_EQ(counter, *iter); + } + EXPECT_EQ(counter, 0); +} + +TEST(FixedArrayTest, Fill) { + absl::FixedArray inlined(5); + int fill_val = 42; + inlined.fill(fill_val); + for (int i : inlined) EXPECT_EQ(i, fill_val); + + absl::FixedArray allocated(5); + allocated.fill(fill_val); + for (int i : allocated) EXPECT_EQ(i, fill_val); + + // It doesn't do anything, just make sure this compiles. + absl::FixedArray empty(0); + empty.fill(fill_val); +} + +#ifndef __GNUC__ +TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { + using T = char; + constexpr auto capacity = 10; + using FixedArrType = absl::FixedArray; + constexpr auto scrubbed_bits = 0x95; + constexpr auto length = capacity / 2; + + alignas(FixedArrType) unsigned char buff[sizeof(FixedArrType)]; + std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrType)); + + FixedArrType* arr = + ::new (static_cast(std::addressof(buff))) FixedArrType(length); + EXPECT_THAT(*arr, testing::Each(scrubbed_bits)); + arr->~FixedArrType(); +} +#endif // __GNUC__ + +TEST(AllocatorSupportTest, CountInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + inlined_size, alloc); + static_cast(arr); + } + + EXPECT_EQ(allocated, 0); + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated = 0; + int64_t active_instances = 0; + + { + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + Alloc alloc(&allocated, &active_instances); + + AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + + EXPECT_EQ(allocated, arr.size() * sizeof(int)); + static_cast(arr); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyInlineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size / 2, initial_value, alloc); + + EXPECT_EQ(allocated1, 0); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, 0); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) { + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + int64_t allocated1 = 0; + int64_t allocated2 = 0; + int64_t active_instances = 0; + Alloc alloc(&allocated1, &active_instances); + Alloc alloc2(&allocated2, &active_instances); + + { + int initial_value = 1; + + AllocFxdArr arr1(inlined_size * 2, initial_value, alloc); + + EXPECT_EQ(allocated1, arr1.size() * sizeof(int)); + + AllocFxdArr arr2(arr1, alloc2); + + EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int)); + static_cast(arr1); + static_cast(arr2); + } + + EXPECT_EQ(active_instances, 0); +} + +TEST(AllocatorSupportTest, SizeValAllocConstructor) { + using testing::AllOf; + using testing::Each; + using testing::SizeIs; + + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + { + auto len = inlined_size / 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, 0); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, len * sizeof(int)); + EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); + } +} + +TEST(AllocatorSupportTest, PropagatesStatefulAllocator) { + constexpr size_t inlined_size = 4; + using Alloc = absl::container_internal::CountingAllocator; + using AllocFxdArr = absl::FixedArray; + + auto len = inlined_size * 2; + auto val = 0; + int64_t allocated = 0; + AllocFxdArr arr(len, val, Alloc(&allocated)); + + EXPECT_EQ(allocated, len * sizeof(int)); + + AllocFxdArr copy = arr; + EXPECT_EQ(allocated, len * sizeof(int) * 2); +} + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +TEST(FixedArrayTest, AddressSanitizerAnnotations1) { + absl::FixedArray a(10); + int* raw = a.data(); + raw[0] = 0; + raw[9] = 0; + EXPECT_DEATH_IF_SUPPORTED(raw[-2] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[10] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[31] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations2) { + absl::FixedArray a(12); + char* raw = a.data(); + raw[0] = 0; + raw[11] = 0; + EXPECT_DEATH_IF_SUPPORTED(raw[-7] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[12] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[17] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations3) { + absl::FixedArray a(20); + uint64_t* raw = a.data(); + raw[0] = 0; + raw[19] = 0; + EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[20] = 0, "container-overflow"); +} + +TEST(FixedArrayTest, AddressSanitizerAnnotations4) { + absl::FixedArray a(10); + ThreeInts* raw = a.data(); + raw[0] = ThreeInts(); + raw[9] = ThreeInts(); + // Note: raw[-1] is pointing to 12 bytes before the container range. However, + // there is only a 8-byte red zone before the container range, so we only + // access the last 4 bytes of the struct to make sure it stays within the red + // zone. + EXPECT_DEATH_IF_SUPPORTED(raw[-1].z_ = 0, "container-overflow"); + EXPECT_DEATH_IF_SUPPORTED(raw[10] = ThreeInts(), "container-overflow"); + // The actual size of storage is kDefaultBytes=256, 21*12 = 252, + // so reading raw[21] should still trigger the correct warning. + EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow"); +} +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +TEST(FixedArrayTest, AbslHashValueWorks) { + using V = absl::FixedArray; + std::vector cases; + + // Generate a variety of vectors some of these are small enough for the inline + // space but are stored out of line. + for (int i = 0; i < 10; ++i) { + V v(i); + for (int j = 0; j < i; ++j) { + v[j] = j; + } + cases.push_back(v); + } + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +} + +} // namespace diff --git a/third_party/absl/absl/container/flat_hash_map.h b/third_party/absl/absl/container/flat_hash_map.h new file mode 100644 index 000000000..acd013b02 --- /dev/null +++ b/third_party/absl/absl/container/flat_hash_map.h @@ -0,0 +1,617 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Requires keys that are CopyConstructible +// * Requires values that are MoveConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()` and when the table is moved. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// NOTE: A `flat_hash_map` stores its value types directly inside its +// implementation array to avoid memory indirection. Because a `flat_hash_map` +// is designed to move data when rehashed, map values will not retain pointer +// stability. If you require pointer stability, or if your values are large, +// consider using `absl::flat_hash_map>` instead. +// If your types are not moveable or you require pointer stability for keys, +// consider `absl::node_hash_map`. +// +// Example: +// +// // Create a flat hash map of three strings (that map to strings) +// absl::flat_hash_map ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); +// +// // Force a rehash of the flat hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator>> +class flat_hash_map : public absl::container_internal::raw_hash_map< + absl::container_internal::FlatHashMapPolicy, + Hash, Eq, Allocator> { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() {} + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(flat_hash_map<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +struct FlatHashMapPolicy { + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + slot_policy::destroy(alloc, slot); + } + + template + static auto transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + return slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair* kv) { return kv->second; } + static const V& value(const std::pair* kv) { return kv->second; } +}; + +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/third_party/absl/absl/container/flat_hash_map_test.cc b/third_party/absl/absl/container/flat_hash_map_test.cc new file mode 100644 index 000000000..d90fe9d51 --- /dev/null +++ b/third_party/absl/absl/container/flat_hash_map_test.cc @@ -0,0 +1,357 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_map.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_map_constructor_test.h" +#include "absl/container/internal/unordered_map_lookup_test.h" +#include "absl/container/internal/unordered_map_members_test.h" +#include "absl/container/internal/unordered_map_modifiers_test.h" +#include "absl/log/check.h" +#include "absl/meta/type_traits.h" +#include "absl/types/any.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::_; +using ::testing::IsEmpty; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +// Check that absl::flat_hash_map works in a global constructor. +struct BeforeMain { + BeforeMain() { + absl::flat_hash_map x; + x.insert({1, 1}); + CHECK(x.find(0) == x.end()) << "x should not contain 0"; + auto it = x.find(1); + CHECK(it != x.end()) << "x should contain 1"; + CHECK(it->second) << "1 should map to 1"; + } +}; +const BeforeMain before_main; + +template +using Map = flat_hash_map>>; + +static_assert(!std::is_standard_layout(), ""); + +using MapTypes = + ::testing::Types, Map, + Map, Map, + Map, Map>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes); + +using UniquePtrMapTypes = ::testing::Types>>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, UniquePtrModifiersTest, + UniquePtrMapTypes); + +TEST(FlatHashMap, StandardLayout) { + struct Int { + explicit Int(size_t value) : value(value) {} + Int() : value(0) { ADD_FAILURE(); } + Int(const Int& other) : value(other.value) { ADD_FAILURE(); } + Int(Int&&) = default; + bool operator==(const Int& other) const { return value == other.value; } + size_t value; + }; + static_assert(std::is_standard_layout(), ""); + + struct Hash { + size_t operator()(const Int& obj) const { return obj.value; } + }; + + // Verify that neither the key nor the value get default-constructed or + // copy-constructed. + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.rehash(2 * m.bucket_count()); + } + { + flat_hash_map m; + m.try_emplace(Int(1), Int(2)); + m.try_emplace(Int(3), Int(4)); + m.erase(Int(1)); + m.clear(); + } +} + +TEST(FlatHashMap, Relocatability) { + static_assert(absl::is_trivially_relocatable::value, ""); + static_assert( + absl::is_trivially_relocatable>::value, ""); + static_assert( + std::is_same::transfer>(nullptr, + nullptr, + nullptr)), + std::true_type>::value, + ""); + + struct NonRelocatable { + NonRelocatable() = default; + NonRelocatable(NonRelocatable&&) {} + NonRelocatable& operator=(NonRelocatable&&) { return *this; } + void* self = nullptr; + }; + + EXPECT_FALSE(absl::is_trivially_relocatable::value); + EXPECT_TRUE( + (std::is_same:: + transfer>(nullptr, nullptr, + nullptr)), + std::false_type>::value)); +} + +// gcc becomes unhappy if this is inside the method, so pull it out here. +struct balast {}; + +TEST(FlatHashMap, IteratesMsan) { + // Because SwissTable randomizes on pointer addresses, we keep old tables + // around to ensure we don't reuse old memory. + std::vector> garbage; + for (int i = 0; i < 100; ++i) { + absl::flat_hash_map t; + for (int j = 0; j < 100; ++j) { + t[j]; + for (const auto& p : t) EXPECT_THAT(p, Pair(_, _)); + } + garbage.push_back(std::move(t)); + } +} + +// Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to +// avoid creating expensive key elements when the item is already present in the +// map. +struct LazyInt { + explicit LazyInt(size_t value, int* tracker) + : value(value), tracker(tracker) {} + + explicit operator size_t() const { + ++*tracker; + return value; + } + + size_t value; + int* tracker; +}; + +struct Hash { + using is_transparent = void; + int* tracker; + size_t operator()(size_t obj) const { + ++*tracker; + return obj; + } + size_t operator()(const LazyInt& obj) const { + ++*tracker; + return obj.value; + } +}; + +struct Eq { + using is_transparent = void; + bool operator()(size_t lhs, size_t rhs) const { return lhs == rhs; } + bool operator()(size_t lhs, const LazyInt& rhs) const { + return lhs == rhs.value; + } +}; + +TEST(FlatHashMap, LazyKeyPattern) { + // hashes are only guaranteed in opt mode, we use assertions to track internal + // state that can cause extra calls to hash. + int conversions = 0; + int hashes = 0; + flat_hash_map m(0, Hash{&hashes}); + m.reserve(3); + + m[LazyInt(1, &conversions)] = 1; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 1); +#endif + + m[LazyInt(1, &conversions)] = 2; + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2))); + EXPECT_EQ(conversions, 1); +#ifdef NDEBUG + EXPECT_EQ(hashes, 2); +#endif + + m.try_emplace(LazyInt(2, &conversions), 3); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 3); +#endif + + m.try_emplace(LazyInt(2, &conversions), 4); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); + EXPECT_EQ(conversions, 2); +#ifdef NDEBUG + EXPECT_EQ(hashes, 4); +#endif +} + +TEST(FlatHashMap, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + flat_hash_map m; + m.erase(n); + m.count(n); + m.prefetch(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(FlatHashMap, MergeExtractInsert) { + // We can't test mutable keys, or non-copyable keys with flat_hash_map. + // Test that the nodes have the proper API. + absl::flat_hash_map m = {{1, 7}, {2, 9}}; + auto node = m.extract(1); + EXPECT_TRUE(node); + EXPECT_EQ(node.key(), 1); + EXPECT_EQ(node.mapped(), 7); + EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9))); + + node.mapped() = 17; + m.insert(std::move(node)); + EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); +} + +bool FirstIsEven(std::pair p) { return p.first % 2 == 0; } + +TEST(FlatHashMap, EraseIf) { + // Erase all elements. + { + flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), + Pair(4, 4), Pair(5, 5))); + } + // Erase specific elements. + { + flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + EXPECT_EQ(erase_if(s, + [](std::pair kvp) { + return kvp.first % 2 == 1; + }), + 3); + EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); + } + // Predicate is function reference. + { + flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + EXPECT_EQ(erase_if(s, FirstIsEven), 2); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } + // Predicate is function pointer. + { + flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + EXPECT_EQ(erase_if(s, &FirstIsEven), 2); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } +} + +// This test requires std::launder for mutable key access in node handles. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 +TEST(FlatHashMap, NodeHandleMutableKeyAccess) { + flat_hash_map map; + + map["key1"] = "mapped"; + + auto nh = map.extract(map.begin()); + nh.key().resize(3); + map.insert(std::move(nh)); + + EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped"))); +} +#endif + +TEST(FlatHashMap, Reserve) { + // Verify that if we reserve(size() + n) then we can perform n insertions + // without a rehash, i.e., without invalidating any references. + for (size_t trial = 0; trial < 20; ++trial) { + for (size_t initial = 3; initial < 100; ++initial) { + // Fill in `initial` entries, then erase 2 of them, then reserve space for + // two inserts and check for reference stability while doing the inserts. + flat_hash_map map; + for (size_t i = 0; i < initial; ++i) { + map[i] = i; + } + map.erase(0); + map.erase(1); + map.reserve(map.size() + 2); + size_t& a2 = map[2]; + // In the event of a failure, asan will complain in one of these two + // assignments. + map[initial] = a2; + map[initial + 1] = a2; + // Fail even when not under asan: + size_t& a2new = map[2]; + EXPECT_EQ(&a2, &a2new); + } + } +} + +TEST(FlatHashMap, RecursiveTypeCompiles) { + struct RecursiveType { + flat_hash_map m; + }; + RecursiveType t; + t.m[0] = RecursiveType{}; +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/flat_hash_set.h b/third_party/absl/absl/container/flat_hash_set.h new file mode 100644 index 000000000..a94a82a0b --- /dev/null +++ b/third_party/absl/absl/container/flat_hash_set.h @@ -0,0 +1,507 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Requires keys that are CopyConstructible +// * Supports heterogeneous lookup, through `find()` and `insert()`, provided +// that the set is provided a compatible heterogeneous hashing function and +// equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()` and when the table is moved. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All +// fundamental and Abseil types that support the `absl::Hash` framework have a +// compatible equality operator for comparing insertions into `flat_hash_set`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// NOTE: A `flat_hash_set` stores its keys directly inside its implementation +// array to avoid memory indirection. Because a `flat_hash_set` is designed to +// move data when rehashed, set keys will not retain pointer stability. If you +// require pointer stability, consider using +// `absl::flat_hash_set>`. If your type is not moveable and +// you require pointer stability, consider `absl::node_hash_set` instead. +// +// Example: +// +// // Create a flat hash set of three strings +// absl::flat_hash_set ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the flat hash set +// ducks.insert("donald"); +// +// // Force a rehash of the flat hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator> +class flat_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() {} + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. The special case of calling + // `erase(begin(), end())` resets the reserved growth such that if + // `reserve(N)` has previously been called and there has been no intervening + // call to `clear()`, then after calling `erase(begin(), end())`, it is safe + // to assume that inserting N elements will not cause a rehash. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash set into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(flat_hash_set<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +struct FlatHashSetPolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/third_party/absl/absl/container/flat_hash_set_test.cc b/third_party/absl/absl/container/flat_hash_set_test.cc new file mode 100644 index 000000000..a60b4bf56 --- /dev/null +++ b/third_party/absl/absl/container/flat_hash_set_test.cc @@ -0,0 +1,243 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/flat_hash_set.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/unordered_set_constructor_test.h" +#include "absl/container/internal/unordered_set_lookup_test.h" +#include "absl/container/internal/unordered_set_members_test.h" +#include "absl/container/internal/unordered_set_modifiers_test.h" +#include "absl/log/check.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::container_internal::hash_internal::Enum; +using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::IsEmpty; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; + +// Check that absl::flat_hash_set works in a global constructor. +struct BeforeMain { + BeforeMain() { + absl::flat_hash_set x; + x.insert(1); + CHECK(!x.contains(0)) << "x should not contain 0"; + CHECK(x.contains(1)) << "x should contain 1"; + } +}; +const BeforeMain before_main; + +template +using Set = + absl::flat_hash_set>; + +using SetTypes = + ::testing::Types, Set, Set, Set>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes); +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes); + +TEST(FlatHashSet, EmplaceString) { + std::vector v = {"a", "b"}; + absl::flat_hash_set hs(v.begin(), v.end()); + EXPECT_THAT(hs, UnorderedElementsAreArray(v)); +} + +TEST(FlatHashSet, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::flat_hash_set s = {n}; + s.insert(n); + s.insert(s.end(), n); + s.insert({n}); + s.erase(n); + s.count(n); + s.prefetch(n); + s.find(n); + s.contains(n); + s.equal_range(n); +} + +TEST(FlatHashSet, MergeExtractInsert) { + struct Hash { + size_t operator()(const std::unique_ptr& p) const { return *p; } + }; + struct Eq { + bool operator()(const std::unique_ptr& a, + const std::unique_ptr& b) const { + return *a == *b; + } + }; + absl::flat_hash_set, Hash, Eq> set1, set2; + set1.insert(absl::make_unique(7)); + set1.insert(absl::make_unique(17)); + + set2.insert(absl::make_unique(7)); + set2.insert(absl::make_unique(19)); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); + + set1.merge(set2); + + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + auto node = set1.extract(absl::make_unique(7)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(7)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); + + auto insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_FALSE(insert_result.inserted); + EXPECT_TRUE(insert_result.node); + EXPECT_THAT(insert_result.node.value(), Pointee(7)); + EXPECT_EQ(**insert_result.position, 7); + EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); + + node = set1.extract(absl::make_unique(17)); + EXPECT_TRUE(node); + EXPECT_THAT(node.value(), Pointee(17)); + EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); + + node.value() = absl::make_unique(23); + + insert_result = set2.insert(std::move(node)); + EXPECT_FALSE(node); + EXPECT_TRUE(insert_result.inserted); + EXPECT_FALSE(insert_result.node); + EXPECT_EQ(**insert_result.position, 23); + EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); +} + +bool IsEven(int k) { return k % 2 == 0; } + +TEST(FlatHashSet, EraseIf) { + // Erase all elements. + { + flat_hash_set s = {1, 2, 3, 4, 5}; + EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + flat_hash_set s = {1, 2, 3, 4, 5}; + EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); + EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); + } + // Erase specific elements. + { + flat_hash_set s = {1, 2, 3, 4, 5}; + EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); + EXPECT_THAT(s, UnorderedElementsAre(2, 4)); + } + // Predicate is function reference. + { + flat_hash_set s = {1, 2, 3, 4, 5}; + EXPECT_EQ(erase_if(s, IsEven), 2); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } + // Predicate is function pointer. + { + flat_hash_set s = {1, 2, 3, 4, 5}; + EXPECT_EQ(erase_if(s, &IsEven), 2); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } +} + +class PoisonInline { + int64_t data_; + + public: + explicit PoisonInline(int64_t d) : data_(d) { + SanitizerPoisonObject(&data_); + } + PoisonInline(const PoisonInline& that) : PoisonInline(*that) {} + ~PoisonInline() { SanitizerUnpoisonObject(&data_); } + + int64_t operator*() const { + SanitizerUnpoisonObject(&data_); + const int64_t ret = data_; + SanitizerPoisonObject(&data_); + return ret; + } + template + friend H AbslHashValue(H h, const PoisonInline& pi) { + return H::combine(std::move(h), *pi); + } + bool operator==(const PoisonInline& rhs) const { return **this == *rhs; } +}; + +// Tests that we don't touch the poison_ member of PoisonInline. +TEST(FlatHashSet, PoisonInline) { + PoisonInline a(0), b(1); + { // basic usage + flat_hash_set set; + set.insert(a); + EXPECT_THAT(set, UnorderedElementsAre(a)); + set.insert(b); + EXPECT_THAT(set, UnorderedElementsAre(a, b)); + set.erase(a); + EXPECT_THAT(set, UnorderedElementsAre(b)); + set.rehash(0); // shrink to inline + EXPECT_THAT(set, UnorderedElementsAre(b)); + } + { // test move constructor from inline to inline + flat_hash_set set; + set.insert(a); + flat_hash_set set2(std::move(set)); + EXPECT_THAT(set2, UnorderedElementsAre(a)); + } + { // test move assignment from inline to inline + flat_hash_set set, set2; + set.insert(a); + set2 = std::move(set); + EXPECT_THAT(set2, UnorderedElementsAre(a)); + } + { // test alloc move constructor from inline to inline + flat_hash_set set; + set.insert(a); + flat_hash_set set2(std::move(set), + std::allocator()); + EXPECT_THAT(set2, UnorderedElementsAre(a)); + } +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/inlined_vector.h b/third_party/absl/absl/container/inlined_vector.h new file mode 100644 index 000000000..04e2c3856 --- /dev/null +++ b/third_party/absl/absl/container/inlined_vector.h @@ -0,0 +1,1002 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: inlined_vector.h +// ----------------------------------------------------------------------------- +// +// This header file contains the declaration and definition of an "inlined +// vector" which behaves in an equivalent fashion to a `std::vector`, except +// that storage for small sequences of the vector are provided inline without +// requiring any heap allocation. +// +// An `absl::InlinedVector` specifies the default capacity `N` as one of +// its template parameters. Instances where `size() <= N` hold contained +// elements in inline space. Typically `N` is very small so that sequences that +// are expected to be short do not require allocations. +// +// An `absl::InlinedVector` does not usually require a specific allocator. If +// the inlined vector grows beyond its initial constraints, it will need to +// allocate (as any normal `std::vector` would). This is usually performed with +// the default allocator (defined as `std::allocator`). Optionally, a custom +// allocator type may be specified as `A` in `absl::InlinedVector`. + +#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/inlined_vector.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +// ----------------------------------------------------------------------------- +// InlinedVector +// ----------------------------------------------------------------------------- +// +// An `absl::InlinedVector` is designed to be a drop-in replacement for +// `std::vector` for use cases where the vector's size is sufficiently small +// that it can be inlined. If the inlined vector does grow beyond its estimated +// capacity, it will trigger an initial allocation on the heap, and will behave +// as a `std::vector`. The API of the `absl::InlinedVector` within this file is +// designed to cover the same API footprint as covered by `std::vector`. +template > +class InlinedVector { + static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); + + using Storage = inlined_vector_internal::Storage; + + template + using AllocatorTraits = inlined_vector_internal::AllocatorTraits; + template + using MoveIterator = inlined_vector_internal::MoveIterator; + template + using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + template + using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; + template + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + template + using EnableIfAtLeastForwardIterator = absl::enable_if_t< + inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + template + using DisableIfAtLeastForwardIterator = absl::enable_if_t< + !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + + using MemcpyPolicy = typename Storage::MemcpyPolicy; + using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy; + using ElementwiseConstructPolicy = + typename Storage::ElementwiseConstructPolicy; + using MoveAssignmentPolicy = typename Storage::MoveAssignmentPolicy; + + public: + using allocator_type = A; + using value_type = inlined_vector_internal::ValueType; + using pointer = inlined_vector_internal::Pointer; + using const_pointer = inlined_vector_internal::ConstPointer; + using size_type = inlined_vector_internal::SizeType; + using difference_type = inlined_vector_internal::DifferenceType; + using reference = inlined_vector_internal::Reference; + using const_reference = inlined_vector_internal::ConstReference; + using iterator = inlined_vector_internal::Iterator; + using const_iterator = inlined_vector_internal::ConstIterator; + using reverse_iterator = inlined_vector_internal::ReverseIterator; + using const_reverse_iterator = + inlined_vector_internal::ConstReverseIterator; + + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a value-initialized allocator. + InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} + + // Creates an empty inlined vector with a copy of `allocator`. + explicit InlinedVector(const allocator_type& allocator) noexcept + : storage_(allocator) {} + + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(DefaultValueAdapter(), n); + } + + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); + } + + // Creates an inlined vector with copies of the elements of `list`. + InlinedVector(std::initializer_list list, + const allocator_type& allocator = allocator_type()) + : InlinedVector(list.begin(), list.end(), allocator) {} + + // Creates an inlined vector with elements constructed from the provided + // forward iterator range [`first`, `last`). + // + // NOTE: the `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template = 0> + InlinedVector(ForwardIterator first, ForwardIterator last, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template = 0> + InlinedVector(InputIterator first, InputIterator last, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + std::copy(first, last, std::back_inserter(*this)); + } + + // Creates an inlined vector by copying the contents of `other` using + // `other`'s allocator. + InlinedVector(const InlinedVector& other) + : InlinedVector(other, other.storage_.GetAllocator()) {} + + // Creates an inlined vector by copying the contents of `other` using the + // provided `allocator`. + InlinedVector(const InlinedVector& other, const allocator_type& allocator) + : storage_(allocator) { + // Fast path: if the other vector is empty, there's nothing for us to do. + if (other.empty()) { + return; + } + + // Fast path: if the value type is trivially copy constructible, we know the + // allocator doesn't do anything fancy, and there is nothing on the heap + // then we know it is legal for us to simply memcpy the other vector's + // inlined bytes to form our copy of its elements. + if (absl::is_trivially_copy_constructible::value && + std::is_same>::value && + !other.storage_.GetIsAllocated()) { + storage_.MemcpyFrom(other.storage_); + return; + } + + storage_.InitFrom(other.storage_); + } + + // Creates an inlined vector by moving in the contents of `other` without + // allocating. If `other` contains allocated memory, the newly-created inlined + // vector will take ownership of that memory. However, if `other` does not + // contain allocated memory, the newly-created inlined vector will perform + // element-wise move construction of the contents of `other`. + // + // NOTE: since no allocation is performed for the inlined vector in either + // case, the `noexcept(...)` specification depends on whether moving the + // underlying objects can throw. It is assumed assumed that... + // a) move constructors should only throw due to allocation failure. + // b) if `value_type`'s move constructor allocates, it uses the same + // allocation function as the inlined vector's allocator. + // Thus, the move constructor is non-throwing if the allocator is non-throwing + // or `value_type`'s move constructor is specified as `noexcept`. + InlinedVector(InlinedVector&& other) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value) + : storage_(other.storage_.GetAllocator()) { + // Fast path: if the value type can be trivially relocated (i.e. moved from + // and destroyed), and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for `other` + // and remove its own reference to them. It's as if we had individually + // move-constructed each value and then destroyed the original. + if (absl::is_trivially_relocatable::value && + std::is_same>::value) { + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + return; + } + + // Fast path: if the other vector is on the heap, we can simply take over + // its allocation. + if (other.storage_.GetIsAllocated()) { + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + return; + } + + // Otherwise we must move each element individually. + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData())); + + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, + other.storage_.GetSize()); + + storage_.SetInlinedSize(other.storage_.GetSize()); + } + + // Creates an inlined vector by moving in the contents of `other` with a copy + // of `allocator`. + // + // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` + // contains allocated memory, this move constructor will still allocate. Since + // allocation is performed, this constructor can only be `noexcept` if the + // specified allocator is also `noexcept`. + InlinedVector( + InlinedVector&& other, + const allocator_type& + allocator) noexcept(absl::allocator_is_nothrow::value) + : storage_(allocator) { + // Fast path: if the value type can be trivially relocated (i.e. moved from + // and destroyed), and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for `other` + // and remove its own reference to them. It's as if we had individually + // move-constructed each value and then destroyed the original. + if (absl::is_trivially_relocatable::value && + std::is_same>::value) { + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + return; + } + + // Fast path: if the other vector is on the heap and shared the same + // allocator, we can simply take over its allocation. + if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && + other.storage_.GetIsAllocated()) { + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + return; + } + + // Otherwise we must move each element individually. + storage_.Initialize( + IteratorValueAdapter>(MoveIterator(other.data())), + other.size()); + } + + ~InlinedVector() {} + + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- + + // `InlinedVector::empty()` + // + // Returns whether the inlined vector contains no elements. + bool empty() const noexcept { return !size(); } + + // `InlinedVector::size()` + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept { return storage_.GetSize(); } + + // `InlinedVector::max_size()` + // + // Returns the maximum number of elements the inlined vector can hold. + size_type max_size() const noexcept { + // One bit of the size storage is used to indicate whether the inlined + // vector contains allocated memory. As a result, the maximum size that the + // inlined vector can express is the minimum of the limit of how many + // objects we can allocate and std::numeric_limits::max() / 2. + return (std::min)(AllocatorTraits::max_size(storage_.GetAllocator()), + (std::numeric_limits::max)() / 2); + } + + // `InlinedVector::capacity()` + // + // Returns the number of elements that could be stored in the inlined vector + // without requiring a reallocation. + // + // NOTE: for most inlined vectors, `capacity()` should be equal to the + // template parameter `N`. For inlined vectors which exceed this capacity, + // they will no longer be inlined and `capacity()` will equal the capactity of + // the allocated memory. + size_type capacity() const noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() + : storage_.GetInlinedCapacity(); + } + + // `InlinedVector::data()` + // + // Returns a `pointer` to the elements of the inlined vector. This pointer + // can be used to access and modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + pointer data() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // Overload of `InlinedVector::data()` that returns a `const_pointer` to the + // elements of the inlined vector. This pointer can be used to access but not + // modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + const_pointer data() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // `InlinedVector::operator[](...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of `InlinedVector::operator[](...)` that returns a + // `const_reference` to the `i`th element of the inlined vector. + const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // `InlinedVector::at(...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type)` failed bounds check"); + } + return data()[i]; + } + + // Overload of `InlinedVector::at(...)` that returns a `const_reference` to + // the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type) const` failed bounds check"); + } + return data()[i]; + } + + // `InlinedVector::front()` + // + // Returns a `reference` to the first element of the inlined vector. + reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of `InlinedVector::front()` that returns a `const_reference` to + // the first element of the inlined vector. + const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // `InlinedVector::back()` + // + // Returns a `reference` to the last element of the inlined vector. + reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of `InlinedVector::back()` that returns a `const_reference` to the + // last element of the inlined vector. + const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // `InlinedVector::begin()` + // + // Returns an `iterator` to the beginning of the inlined vector. + iterator begin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); } + + // Overload of `InlinedVector::begin()` that returns a `const_iterator` to + // the beginning of the inlined vector. + const_iterator begin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return data(); + } + + // `InlinedVector::end()` + // + // Returns an `iterator` to the end of the inlined vector. + iterator end() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return data() + size(); + } + + // Overload of `InlinedVector::end()` that returns a `const_iterator` to the + // end of the inlined vector. + const_iterator end() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return data() + size(); + } + + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return begin(); + } + + // `InlinedVector::cend()` + // + // Returns a `const_iterator` to the end of the inlined vector. + const_iterator cend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return end(); + } + + // `InlinedVector::rbegin()` + // + // Returns a `reverse_iterator` from the end of the inlined vector. + reverse_iterator rbegin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return reverse_iterator(end()); + } + + // Overload of `InlinedVector::rbegin()` that returns a + // `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_reverse_iterator(end()); + } + + // `InlinedVector::rend()` + // + // Returns a `reverse_iterator` from the beginning of the inlined vector. + reverse_iterator rend() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return reverse_iterator(begin()); + } + + // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_reverse_iterator(begin()); + } + + // `InlinedVector::crbegin()` + // + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return rbegin(); + } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. + const_reverse_iterator crend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { + return rend(); + } + + // `InlinedVector::get_allocator()` + // + // Returns a copy of the inlined vector's allocator. + allocator_type get_allocator() const { return storage_.GetAllocator(); } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=(...)` + // + // Replaces the elements of the inlined vector with copies of the elements of + // `list`. + InlinedVector& operator=(std::initializer_list list) { + assign(list.begin(), list.end()); + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that replaces the elements of + // the inlined vector with copies of the elements of `other`. + InlinedVector& operator=(const InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + const_pointer other_data = other.data(); + assign(other_data, other_data + other.size()); + } + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that moves the elements of + // `other` into the inlined vector. + // + // NOTE: as a result of calling this overload, `other` is left in a valid but + // unspecified state. + InlinedVector& operator=(InlinedVector&& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + MoveAssignment(MoveAssignmentPolicy{}, std::move(other)); + } + + return *this; + } + + // `InlinedVector::assign(...)` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) { + storage_.Assign(CopyValueAdapter(std::addressof(v)), n); + } + + // Overload of `InlinedVector::assign(...)` that replaces the contents of the + // inlined vector with copies of the elements of `list`. + void assign(std::initializer_list list) { + assign(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + void assign(ForwardIterator first, ForwardIterator last) { + storage_.Assign(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + void assign(InputIterator first, InputIterator last) { + size_type i = 0; + for (; i < size() && first != last; ++i, static_cast(++first)) { + data()[i] = *first; + } + + erase(data() + i, data() + size()); + std::copy(first, last, std::back_inserter(*this)); + } + + // `InlinedVector::resize(...)` + // + // Resizes the inlined vector to contain `n` elements. + // + // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are value-initialized. + void resize(size_type n) { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(DefaultValueAdapter(), n); + } + + // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to + // contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are copied-constructed from `v`. + void resize(size_type n, const_reference v) { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(CopyValueAdapter(std::addressof(v)), n); + } + + // `InlinedVector::insert(...)` + // + // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly + // inserted element. + iterator insert(const_iterator pos, + const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(pos, v); + } + + // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using + // move semantics, returning an `iterator` to the newly inserted element. + iterator insert(const_iterator pos, + value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(pos, std::move(v)); + } + + // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies + // of `v` starting at `pos`, returning an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, + const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(n != 0)) { + value_type dealias = v; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), + n); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts copies of the + // elements of `list` starting at `pos`, returning an `iterator` pointing to + // the first of the newly inserted elements. + iterator insert(const_iterator pos, std::initializer_list list) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert(pos, list.begin(), list.end()); + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + iterator insert(const_iterator pos, ForwardIterator first, + ForwardIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(first != last)) { + return storage_.Insert( + pos, IteratorValueAdapter(first), + static_cast(std::distance(first, last))); + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + iterator insert(const_iterator pos, InputIterator first, + InputIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + size_type index = static_cast(std::distance(cbegin(), pos)); + for (size_type i = index; first != last; ++i, static_cast(++first)) { + insert(data() + i, *first); + } + + return iterator(data() + index); + } + + // `InlinedVector::emplace(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `pos`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator pos, + Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + value_type dealias(std::forward(args)...); + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return storage_.Insert(pos, + IteratorValueAdapter>( + MoveIterator(std::addressof(dealias))), + 1); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + + // `InlinedVector::emplace_back(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `end()`, returning a `reference` to the newly emplaced element. + template + reference emplace_back(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return storage_.EmplaceBack(std::forward(args)...); + } + + // `InlinedVector::push_back(...)` + // + // Inserts a copy of `v` in the inlined vector at `end()`. + void push_back(const_reference v) { static_cast(emplace_back(v)); } + + // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` + // using move semantics. + void push_back(value_type&& v) { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at `back()`, reducing the size by `1`. + void pop_back() noexcept { + ABSL_HARDENING_ASSERT(!empty()); + + AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); + storage_.SubtractSize(1); + } + + // `InlinedVector::erase(...)` + // + // Erases the element at `pos`, returning an `iterator` pointing to where the + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferenceable. + iterator erase(const_iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos < end()); + + return storage_.Erase(pos, pos + 1); + } + + // Overload of `InlinedVector::erase(...)` that erases every element in the + // range [`from`, `to`), returning an `iterator` pointing to where the first + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferenceable. + iterator erase(const_iterator from, + const_iterator to) ABSL_ATTRIBUTE_LIFETIME_BOUND { + ABSL_HARDENING_ASSERT(from >= begin()); + ABSL_HARDENING_ASSERT(from <= to); + ABSL_HARDENING_ASSERT(to <= end()); + + if (ABSL_PREDICT_TRUE(from != to)) { + return storage_.Erase(from, to); + } else { + return const_cast(from); + } + } + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, setting the size to `0` and + // deallocating any held memory. + void clear() noexcept { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + + storage_.SetInlinedSize(0); + } + + // `InlinedVector::reserve(...)` + // + // Ensures that there is enough room for at least `n` elements. + void reserve(size_type n) { storage_.Reserve(n); } + + // `InlinedVector::shrink_to_fit()` + // + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. + // + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. + void shrink_to_fit() { + if (storage_.GetIsAllocated()) { + storage_.ShrinkToFit(); + } + } + + // `InlinedVector::swap(...)` + // + // Swaps the contents of the inlined vector with `other`. + void swap(InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + storage_.Swap(std::addressof(other.storage_)); + } + } + + private: + template + friend H AbslHashValue(H h, const absl::InlinedVector& a); + + void MoveAssignment(MemcpyPolicy, InlinedVector&& other) { + // Assumption check: we shouldn't be told to use memcpy to implement move + // assignment unless we have trivially destructible elements and an + // allocator that does nothing fancy. + static_assert(absl::is_trivially_destructible::value, ""); + static_assert(std::is_same>::value, ""); + + // Throw away our existing heap allocation, if any. There is no need to + // destroy the existing elements one by one because we know they are + // trivially destructible. + storage_.DeallocateIfAllocated(); + + // Adopt the other vector's inline elements or heap allocation. + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + } + + // Destroy our existing elements, if any, and adopt the heap-allocated + // elements of the other vector. + // + // REQUIRES: other.storage_.GetIsAllocated() + void DestroyExistingAndAdopt(InlinedVector&& other) { + ABSL_HARDENING_ASSERT(other.storage_.GetIsAllocated()); + + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + + storage_.MemcpyFrom(other.storage_); + other.storage_.SetInlinedSize(0); + } + + void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) { + // Fast path: if the other vector is on the heap then we don't worry about + // actually move-assigning each element. Instead we only throw away our own + // existing elements and adopt the heap allocation of the other vector. + if (other.storage_.GetIsAllocated()) { + DestroyExistingAndAdopt(std::move(other)); + return; + } + + storage_.Assign(IteratorValueAdapter>( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); + } + + void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) { + // Fast path: if the other vector is on the heap then we don't worry about + // actually move-assigning each element. Instead we only throw away our own + // existing elements and adopt the heap allocation of the other vector. + if (other.storage_.GetIsAllocated()) { + DestroyExistingAndAdopt(std::move(other)); + return; + } + + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData())); + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, + other.storage_.GetSize()); + storage_.SetInlinedSize(other.storage_.GetSize()); + } + + Storage storage_; +}; + +// ----------------------------------------------------------------------------- +// InlinedVector Non-Member Functions +// ----------------------------------------------------------------------------- + +// `swap(...)` +// +// Swaps the contents of two inlined vectors. +template +void swap(absl::InlinedVector& a, + absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); +} + +// `operator==(...)` +// +// Tests for value-equality of two inlined vectors. +template +bool operator==(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); +} + +// `operator!=(...)` +// +// Tests for value-inequality of two inlined vectors. +template +bool operator!=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a == b); +} + +// `operator<(...)` +// +// Tests whether the value of an inlined vector is less than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator<(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return std::lexicographical_compare(a_data, a_data + a.size(), b_data, + b_data + b.size()); +} + +// `operator>(...)` +// +// Tests whether the value of an inlined vector is greater than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator>(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return b < a; +} + +// `operator<=(...)` +// +// Tests whether the value of an inlined vector is less than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator<=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(b < a); +} + +// `operator>=(...)` +// +// Tests whether the value of an inlined vector is greater than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator>=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a < b); +} + +// `AbslHashValue(...)` +// +// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to +// call this directly. +template +H AbslHashValue(H h, const absl::InlinedVector& a) { + auto size = a.size(); + return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/third_party/absl/absl/container/inlined_vector_benchmark.cc b/third_party/absl/absl/container/inlined_vector_benchmark.cc new file mode 100644 index 000000000..5a04277c6 --- /dev/null +++ b/third_party/absl/absl/container/inlined_vector_benchmark.cc @@ -0,0 +1,829 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" +#include "absl/strings/str_cat.h" +#include "benchmark/benchmark.h" + +namespace { + +void BM_InlinedVectorFill(benchmark::State& state) { + const int len = state.range(0); + absl::InlinedVector v; + v.reserve(len); + for (auto _ : state) { + v.resize(0); // Use resize(0) as InlinedVector releases storage on clear(). + for (int i = 0; i < len; ++i) { + v.push_back(i); + } + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_InlinedVectorFill)->Range(1, 256); + +void BM_InlinedVectorFillRange(benchmark::State& state) { + const int len = state.range(0); + const std::vector src(len, len); + absl::InlinedVector v; + v.reserve(len); + for (auto _ : state) { + benchmark::DoNotOptimize(src); + v.assign(src.begin(), src.end()); + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256); + +void BM_StdVectorFill(benchmark::State& state) { + const int len = state.range(0); + std::vector v; + v.reserve(len); + for (auto _ : state) { + v.clear(); + for (int i = 0; i < len; ++i) { + v.push_back(i); + } + benchmark::DoNotOptimize(v); + } +} +BENCHMARK(BM_StdVectorFill)->Range(1, 256); + +// The purpose of the next two benchmarks is to verify that +// absl::InlinedVector is efficient when moving is more efficient than +// copying. To do so, we use strings that are larger than the short +// string optimization. +bool StringRepresentedInline(std::string s) { + const char* chars = s.data(); + std::string s1 = std::move(s); + return s1.data() != chars; +} + +int GetNonShortStringOptimizationSize() { + for (int i = 24; i <= 192; i *= 2) { + if (!StringRepresentedInline(std::string(i, 'A'))) { + return i; + } + } + ABSL_RAW_LOG( + FATAL, + "Failed to find a string larger than the short string optimization"); + return -1; +} + +void BM_InlinedVectorFillString(benchmark::State& state) { + const int len = state.range(0); + const int no_sso = GetNonShortStringOptimizationSize(); + std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + + for (auto _ : state) { + absl::InlinedVector v; + for (int i = 0; i < len; i++) { + v.push_back(strings[i & 3]); + } + } + state.SetItemsProcessed(static_cast(state.iterations()) * len); +} +BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024); + +void BM_StdVectorFillString(benchmark::State& state) { + const int len = state.range(0); + const int no_sso = GetNonShortStringOptimizationSize(); + std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + + for (auto _ : state) { + std::vector v; + for (int i = 0; i < len; i++) { + v.push_back(strings[i & 3]); + } + } + state.SetItemsProcessed(static_cast(state.iterations()) * len); +} +BENCHMARK(BM_StdVectorFillString)->Range(0, 1024); + +struct Buffer { // some arbitrary structure for benchmarking. + char* base; + int length; + int capacity; + void* user_data; +}; + +void BM_InlinedVectorAssignments(benchmark::State& state) { + const int len = state.range(0); + using BufferVec = absl::InlinedVector; + + BufferVec src; + src.resize(len); + + BufferVec dst; + for (auto _ : state) { + benchmark::DoNotOptimize(dst); + benchmark::DoNotOptimize(src); + dst = src; + } +} +BENCHMARK(BM_InlinedVectorAssignments) + ->Arg(0) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Arg(4) + ->Arg(20); + +void BM_CreateFromContainer(benchmark::State& state) { + for (auto _ : state) { + absl::InlinedVector src{1, 2, 3}; + benchmark::DoNotOptimize(src); + absl::InlinedVector dst(std::move(src)); + benchmark::DoNotOptimize(dst); + } +} +BENCHMARK(BM_CreateFromContainer); + +struct LargeCopyableOnly { + LargeCopyableOnly() : d(1024, 17) {} + LargeCopyableOnly(const LargeCopyableOnly& o) = default; + LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default; + + std::vector d; +}; + +struct LargeCopyableSwappable { + LargeCopyableSwappable() : d(1024, 17) {} + + LargeCopyableSwappable(const LargeCopyableSwappable& o) = default; + + LargeCopyableSwappable& operator=(LargeCopyableSwappable o) { + using std::swap; + swap(*this, o); + return *this; + } + + friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) { + using std::swap; + swap(a.d, b.d); + } + + std::vector d; +}; + +struct LargeCopyableMovable { + LargeCopyableMovable() : d(1024, 17) {} + // Use implicitly defined copy and move. + + std::vector d; +}; + +struct LargeCopyableMovableSwappable { + LargeCopyableMovableSwappable() : d(1024, 17) {} + LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) = + default; + LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default; + + LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) { + using std::swap; + swap(*this, o); + return *this; + } + LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) = + default; + + friend void swap(LargeCopyableMovableSwappable& a, + LargeCopyableMovableSwappable& b) { + using std::swap; + swap(a.d, b.d); + } + + std::vector d; +}; + +template +void BM_SwapElements(benchmark::State& state) { + const int len = state.range(0); + using Vec = absl::InlinedVector; + Vec a(len); + Vec b; + for (auto _ : state) { + using std::swap; + benchmark::DoNotOptimize(a); + benchmark::DoNotOptimize(b); + swap(a, b); + } +} +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024); +BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable) + ->Range(0, 1024); + +// The following benchmark is meant to track the efficiency of the vector size +// as a function of stored type via the benchmark label. It is not meant to +// output useful sizeof operator performance. The loop is a dummy operation +// to fulfill the requirement of running the benchmark. +template +void BM_Sizeof(benchmark::State& state) { + int size = 0; + for (auto _ : state) { + VecType vec; + size = sizeof(vec); + } + state.SetLabel(absl::StrCat("sz=", size)); +} +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); +BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); + +void BM_InlinedVectorIndexInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_InlinedVectorIndexInlined); + +void BM_InlinedVectorIndexExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_InlinedVectorIndexExternal); + +void BM_StdVectorIndex(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); + } +} +BENCHMARK(BM_StdVectorIndex); + +void BM_InlinedVectorDataInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } +} +BENCHMARK(BM_InlinedVectorDataInlined); + +void BM_InlinedVectorDataExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } + state.SetItemsProcessed(16 * static_cast(state.iterations())); +} +BENCHMARK(BM_InlinedVectorDataExternal); + +void BM_StdVectorData(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); + } + state.SetItemsProcessed(16 * static_cast(state.iterations())); +} +BENCHMARK(BM_StdVectorData); + +void BM_InlinedVectorSizeInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_InlinedVectorSizeInlined); + +void BM_InlinedVectorSizeExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_InlinedVectorSizeExternal); + +void BM_StdVectorSize(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); + } +} +BENCHMARK(BM_StdVectorSize); + +void BM_InlinedVectorEmptyInlined(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_InlinedVectorEmptyInlined); + +void BM_InlinedVectorEmptyExternal(benchmark::State& state) { + absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_InlinedVectorEmptyExternal); + +void BM_StdVectorEmpty(benchmark::State& state) { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + for (auto _ : state) { + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); + } +} +BENCHMARK(BM_StdVectorEmpty); + +constexpr size_t kInlinedCapacity = 4; +constexpr size_t kLargeSize = kInlinedCapacity * 2; +constexpr size_t kSmallSize = kInlinedCapacity / 2; +constexpr size_t kBatchSize = 100; + +#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) + +#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize) + +template +using InlVec = absl::InlinedVector; + +struct TrivialType { + size_t val; +}; + +class NontrivialType { + public: + ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other) + : val_(other.val_) { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=( + const NontrivialType& other) { + val_ = other.val_; + benchmark::DoNotOptimize(*this); + return *this; + } + + ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept { + benchmark::DoNotOptimize(*this); + } + + private: + size_t val_; +}; + +template +void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, + TestVecFn test_vec) { + std::array, kBatchSize> vector_batch{}; + + while (state.KeepRunningBatch(kBatchSize)) { + // Prepare batch + state.PauseTiming(); + for (size_t i = 0; i < kBatchSize; ++i) { + prepare_vec(vector_batch.data() + i, i); + } + benchmark::DoNotOptimize(vector_batch); + state.ResumeTiming(); + + // Test batch + for (size_t i = 0; i < kBatchSize; ++i) { + test_vec(vector_batch.data() + i, i); + } + } +} + +template +void BM_ConstructFromSize(benchmark::State& state) { + using VecT = InlVec; + auto size = ToSize; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + ::new (ptr) VecT(size); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); + +template +void BM_ConstructFromSizeRef(benchmark::State& state) { + using VecT = InlVec; + auto size = ToSize; + auto ref = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + ::new (ptr) VecT(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); + +template +void BM_ConstructFromRange(benchmark::State& state) { + using VecT = InlVec; + std::array arr{}; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(arr); + ::new (ptr) VecT(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); + +template +void BM_ConstructFromCopy(benchmark::State& state) { + using VecT = InlVec; + VecT other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(other_vec); + ::new (ptr) VecT(other_vec); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); + +template +void BM_ConstructFromMove(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->~VecT(); + }, + /* test_vec = */ + [&](void* ptr, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + ::new (ptr) VecT(std::move(vector_batch[i])); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); + +// Measure cost of copy-constructor+destructor. +void BM_CopyTrivial(benchmark::State& state) { + const int n = state.range(0); + InlVec src(n); + for (auto s : state) { + InlVec copy(src); + benchmark::DoNotOptimize(copy); + } +} +BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); + +// Measure cost of copy-constructor+destructor. +void BM_CopyNonTrivial(benchmark::State& state) { + const int n = state.range(0); + InlVec> src(n); + for (auto s : state) { + InlVec> copy(src); + benchmark::DoNotOptimize(copy); + } +} +BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); + +template +void BM_AssignSizeRef(benchmark::State& state) { + auto size = ToSize; + auto ref = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + vec->assign(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType); + +template +void BM_AssignRange(benchmark::State& state) { + std::array arr{}; + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(arr); + vec->assign(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType); + +template +void BM_AssignFromCopy(benchmark::State& state) { + InlVec other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + *vec = other_vec; + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType); + +template +void BM_AssignFromMove(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + *vec = std::move(vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType); + +template +void BM_ResizeSize(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->resize(ToSize); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType); + +template +void BM_ResizeSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(t); + vec->resize(ToSize, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType); + +template +void BM_InsertSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(t); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType); + +template +void BM_InsertRange(benchmark::State& state) { + InlVec other_vec(ToSize); + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, other_vec.begin(), other_vec.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType); + +template +void BM_EmplaceBack(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->emplace_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType); + +template +void BM_PopBack(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->pop_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType); + +template +void BM_EraseOne(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType); + +template +void BM_EraseRange(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos, pos + 1); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType); + +template +void BM_Clear(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ [](InlVec* vec, size_t) { vec->clear(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); + +template +void BM_Reserve(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec* vec, size_t) { vec->reserve(ToCapacity); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType); + +template +void BM_ShrinkToFit(benchmark::State& state) { + BatchedBenchmark( + state, + /* prepare_vec = */ + [](InlVec* vec, size_t) { + vec->clear(); + vec->resize(ToCapacity); + vec->reserve(FromCapacity); + }, + /* test_vec = */ [](InlVec* vec, size_t) { vec->shrink_to_fit(); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType); + +template +void BM_Swap(benchmark::State& state) { + using VecT = InlVec; + std::array vector_batch{}; + BatchedBenchmark( + state, + /* prepare_vec = */ + [&](InlVec* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec* vec, size_t i) { + using std::swap; + benchmark::DoNotOptimize(vector_batch[i]); + swap(*vec, vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType); + +} // namespace diff --git a/third_party/absl/absl/container/inlined_vector_exception_safety_test.cc b/third_party/absl/absl/container/inlined_vector_exception_safety_test.cc new file mode 100644 index 000000000..0e6a05b5f --- /dev/null +++ b/third_party/absl/absl/container/inlined_vector_exception_safety_test.cc @@ -0,0 +1,508 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/inlined_vector.h" + +#include "absl/base/config.h" + +#if defined(ABSL_HAVE_EXCEPTIONS) + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/exception_safety_testing.h" + +namespace { + +constexpr size_t kInlinedCapacity = 4; +constexpr size_t kLargeSize = kInlinedCapacity * 2; +constexpr size_t kSmallSize = kInlinedCapacity / 2; + +using Thrower = testing::ThrowingValue<>; +using MovableThrower = testing::ThrowingValue; +using ThrowAlloc = testing::ThrowingAllocator; + +using ThrowerVec = absl::InlinedVector; +using MovableThrowerVec = absl::InlinedVector; + +using ThrowAllocThrowerVec = + absl::InlinedVector; +using ThrowAllocMovableThrowerVec = + absl::InlinedVector; + +// In GCC, if an element of a `std::initializer_list` throws during construction +// the elements that were constructed before it are not destroyed. This causes +// incorrect exception safety test failures. Thus, `testing::nothrow_ctor` is +// required. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66139 +#define ABSL_INTERNAL_MAKE_INIT_LIST(T, N) \ + (N > kInlinedCapacity \ + ? std::initializer_list{T(0, testing::nothrow_ctor), \ + T(1, testing::nothrow_ctor), \ + T(2, testing::nothrow_ctor), \ + T(3, testing::nothrow_ctor), \ + T(4, testing::nothrow_ctor), \ + T(5, testing::nothrow_ctor), \ + T(6, testing::nothrow_ctor), \ + T(7, testing::nothrow_ctor)} \ + \ + : std::initializer_list{T(0, testing::nothrow_ctor), \ + T(1, testing::nothrow_ctor)}) +static_assert(kLargeSize == 8, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); +static_assert(kSmallSize == 2, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); + +template +class TestParams { + public: + using VecT = TheVecT; + constexpr static size_t GetSizeAt(size_t i) { return kSizes[1 + i]; } + + private: + constexpr static size_t kSizes[1 + sizeof...(TheSizes)] = {1, TheSizes...}; +}; + +using NoSizeTestParams = + ::testing::Types, TestParams, + TestParams, + TestParams>; + +using OneSizeTestParams = + ::testing::Types, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams>; + +using TwoSizeTestParams = ::testing::Types< + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams, + TestParams>; + +template +struct NoSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams); + +template +struct OneSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams); + +template +struct TwoSizeTest : ::testing::Test {}; +TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams); + +template +bool InlinedVectorInvariants(VecT* vec) { + if (*vec != *vec) return false; + if (vec->size() > vec->capacity()) return false; + if (vec->size() > vec->max_size()) return false; + if (vec->capacity() > vec->max_size()) return false; + if (vec->data() != std::addressof(vec->at(0))) return false; + if (vec->data() != vec->begin()) return false; + if (*vec->data() != *vec->begin()) return false; + if (vec->begin() > vec->end()) return false; + if ((vec->end() - vec->begin()) != vec->size()) return false; + if (std::distance(vec->begin(), vec->end()) != vec->size()) return false; + return true; +} + +// Function that always returns false is correct, but refactoring is required +// for clarity. It's needed to express that, as a contract, certain operations +// should not throw at all. Execution of this function means an exception was +// thrown and thus the test should fail. +// TODO(johnsoncj): Add `testing::NoThrowGuarantee` to the framework +template +bool NoThrowGuarantee(VecT* /* vec */) { + return false; +} + +TYPED_TEST(NoSizeTest, DefaultConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + + testing::TestThrowingCtor(); + + testing::TestThrowingCtor(allocator_type{}); +} + +TYPED_TEST(OneSizeTest, SizeConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor(size); + + testing::TestThrowingCtor(size, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, SizeRefConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor(size, value_type{}); + + testing::TestThrowingCtor(size, value_type{}, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, InitializerListConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + testing::TestThrowingCtor( + ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size)); + + testing::TestThrowingCtor( + ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size), allocator_type{}); +} + +TYPED_TEST(OneSizeTest, RangeConstructor) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + std::array arr{}; + + testing::TestThrowingCtor(arr.begin(), arr.end()); + + testing::TestThrowingCtor(arr.begin(), arr.end(), allocator_type{}); +} + +TYPED_TEST(OneSizeTest, CopyConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + VecT other_vec{size}; + + testing::TestThrowingCtor(other_vec); + + testing::TestThrowingCtor(other_vec, allocator_type{}); +} + +TYPED_TEST(OneSizeTest, MoveConstructor) { + using VecT = typename TypeParam::VecT; + using allocator_type = typename VecT::allocator_type; + constexpr static auto size = TypeParam::GetSizeAt(0); + + if (!absl::allocator_is_nothrow::value) { + testing::TestThrowingCtor(VecT{size}); + + testing::TestThrowingCtor(VecT{size}, allocator_type{}); + } +} + +TYPED_TEST(TwoSizeTest, Assign) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = other_vec; + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + *vec = std::move(other_vec); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + value_type val{}; + vec->assign(to_size, val); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size)); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + std::array arr{}; + vec->assign(arr.begin(), arr.end()); + })); +} + +TYPED_TEST(TwoSizeTest, Resize) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants, + testing::strong_guarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->resize(to_size); // + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->resize(to_size, value_type{}); // + })); +} + +TYPED_TEST(OneSizeTest, Insert) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, value_type{}); + })); +} + +TYPED_TEST(TwoSizeTest, Insert) { + using VecT = typename TypeParam::VecT; + using value_type = typename VecT::value_type; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto count = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, count, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, count, value_type{}); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, count, value_type{}); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->end(); + std::array arr{}; + vec->insert(it, arr.begin(), arr.end()); + })); +} + +TYPED_TEST(OneSizeTest, EmplaceBack) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + // For testing calls to `emplace_back(...)` that reallocate. + VecT full_vec{size}; + full_vec.resize(full_vec.capacity()); + + // For testing calls to `emplace_back(...)` that don't reallocate. + VecT nonfull_vec{size}; + nonfull_vec.reserve(size + 1); + + auto tester = testing::MakeExceptionSafetyTester().WithContracts( + InlinedVectorInvariants); + + EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) { + vec->emplace_back(); + })); + + EXPECT_TRUE(tester.WithInitialValue(full_vec).Test( + [](VecT* vec) { vec->emplace_back(); })); +} + +TYPED_TEST(OneSizeTest, PopBack) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(NoThrowGuarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->pop_back(); // + })); +} + +TYPED_TEST(OneSizeTest, Erase) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it, it); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); + vec->erase(it, it + 1); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it, it + 1); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it, it + 1); + })); +} + +TYPED_TEST(OneSizeTest, Clear) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(NoThrowGuarantee); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->clear(); // + })); +} + +TYPED_TEST(TwoSizeTest, Reserve) { + using VecT = typename TypeParam::VecT; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_capacity = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { vec->reserve(to_capacity); })); +} + +TYPED_TEST(OneSizeTest, ShrinkToFit) { + using VecT = typename TypeParam::VecT; + constexpr static auto size = TypeParam::GetSizeAt(0); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + vec->shrink_to_fit(); // + })); +} + +TYPED_TEST(TwoSizeTest, Swap) { + using VecT = typename TypeParam::VecT; + constexpr static auto from_size = TypeParam::GetSizeAt(0); + constexpr static auto to_size = TypeParam::GetSizeAt(1); + + auto tester = testing::MakeExceptionSafetyTester() + .WithInitialValue(VecT{from_size}) + .WithContracts(InlinedVectorInvariants); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + VecT other_vec{to_size}; + vec->swap(other_vec); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + using std::swap; + VecT other_vec{to_size}; + swap(*vec, other_vec); + })); +} + +} // namespace + +#endif // defined(ABSL_HAVE_EXCEPTIONS) diff --git a/third_party/absl/absl/container/inlined_vector_test.cc b/third_party/absl/absl/container/inlined_vector_test.cc new file mode 100644 index 000000000..241389aef --- /dev/null +++ b/third_party/absl/absl/container/inlined_vector_test.cc @@ -0,0 +1,2126 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/inlined_vector.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/internal/exception_testing.h" +#include "absl/base/macros.h" +#include "absl/base/options.h" +#include "absl/container/internal/test_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/hash/hash_testing.h" +#include "absl/log/check.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_cat.h" + +namespace { + +using absl::container_internal::CountingAllocator; +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::CopyableOnlyInstance; +using absl::test_internal::InstanceTracker; +using testing::AllOf; +using testing::Each; +using testing::ElementsAre; +using testing::ElementsAreArray; +using testing::Eq; +using testing::Gt; +using testing::Pointee; +using testing::Pointwise; +using testing::PrintToString; +using testing::SizeIs; + +using IntVec = absl::InlinedVector; + +MATCHER_P(CapacityIs, n, "") { + return testing::ExplainMatchResult(n, arg.capacity(), result_listener); +} + +MATCHER_P(ValueIs, e, "") { + return testing::ExplainMatchResult(e, arg.value(), result_listener); +} + +// TODO(bsamwel): Add support for movable-only types. + +// Test fixture for typed tests on BaseCountedInstance derived classes, see +// test_instance_tracker.h. +template +class InstanceTest : public ::testing::Test {}; +TYPED_TEST_SUITE_P(InstanceTest); + +// A simple reference counted class to make sure that the proper elements are +// destroyed in the erase(begin, end) test. +class RefCounted { + public: + RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); } + + RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) { + Ref(); + } + + ~RefCounted() { + Unref(); + count_ = nullptr; + } + + friend void swap(RefCounted& a, RefCounted& b) { + using std::swap; + swap(a.value_, b.value_); + swap(a.count_, b.count_); + } + + RefCounted& operator=(RefCounted v) { + using std::swap; + swap(*this, v); + return *this; + } + + void Ref() const { + CHECK_NE(count_, nullptr); + ++(*count_); + } + + void Unref() const { + --(*count_); + CHECK_GE(*count_, 0); + } + + int value_; + int* count_; +}; + +using RefCountedVec = absl::InlinedVector; + +// A class with a vtable pointer +class Dynamic { + public: + virtual ~Dynamic() {} +}; + +using DynamicVec = absl::InlinedVector; + +// Append 0..len-1 to *v +template +static void Fill(Container* v, size_t len, int offset = 0) { + for (size_t i = 0; i < len; i++) { + v->push_back(static_cast(i) + offset); + } +} + +static IntVec Fill(size_t len, int offset = 0) { + IntVec v; + Fill(&v, len, offset); + return v; +} + +TEST(IntVec, SimpleOps) { + for (size_t len = 0; len < 20; len++) { + IntVec v; + const IntVec& cv = v; // const alias + + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + for (size_t i = 0; i < len; i++) { + EXPECT_EQ(static_cast(i), v[i]); + EXPECT_EQ(static_cast(i), v.at(i)); + } + EXPECT_EQ(v.begin(), v.data()); + EXPECT_EQ(cv.begin(), cv.data()); + + size_t counter = 0; + for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(static_cast(counter), *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) { + EXPECT_EQ(static_cast(counter), *iter); + counter++; + } + EXPECT_EQ(counter, len); + + counter = 0; + for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) { + EXPECT_EQ(static_cast(counter), *iter); + counter++; + } + EXPECT_EQ(counter, len); + + if (len > 0) { + EXPECT_EQ(0, v.front()); + EXPECT_EQ(static_cast(len - 1), v.back()); + v.pop_back(); + EXPECT_EQ(len - 1, v.size()); + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(static_cast(i), v[i]); + EXPECT_EQ(static_cast(i), v.at(i)); + } + } + } +} + +TEST(IntVec, PopBackNoOverflow) { + IntVec v = {1}; + v.pop_back(); + EXPECT_EQ(v.size(), 0u); +} + +TEST(IntVec, AtThrows) { + IntVec v = {1, 2, 3}; + EXPECT_EQ(v.at(2), 3); + ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range, + "failed bounds check"); +} + +TEST(IntVec, ReverseIterator) { + for (size_t len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + size_t counter = len; + for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { + counter--; + EXPECT_EQ(static_cast(counter), *iter); + } + EXPECT_EQ(counter, 0u); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend(); + ++iter) { + counter--; + EXPECT_EQ(static_cast(counter), *iter); + } + EXPECT_EQ(counter, 0u); + + counter = len; + for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend(); + ++iter) { + counter--; + EXPECT_EQ(static_cast(counter), *iter); + } + EXPECT_EQ(counter, 0u); + } +} + +TEST(IntVec, Erase) { + for (size_t len = 1; len < 20; len++) { + for (size_t i = 0; i < len; ++i) { + IntVec v; + Fill(&v, len); + v.erase(v.begin() + i); + EXPECT_EQ(len - 1, v.size()); + for (size_t j = 0; j < i; ++j) { + EXPECT_EQ(static_cast(j), v[j]); + } + for (size_t j = i; j < len - 1; ++j) { + EXPECT_EQ(static_cast(j + 1), v[j]); + } + } + } +} + +TEST(IntVec, Hardened) { + IntVec v; + Fill(&v, 10); + EXPECT_EQ(v[9], 9); +#if !defined(NDEBUG) || ABSL_OPTION_HARDENED + EXPECT_DEATH_IF_SUPPORTED(v[10], ""); + EXPECT_DEATH_IF_SUPPORTED(v[static_cast(-1)], ""); + EXPECT_DEATH_IF_SUPPORTED(v.resize(v.max_size() + 1), ""); +#endif +} + +// Move construction of a container of unique pointers should work fine, with no +// leaks, despite the fact that unique pointers are trivially relocatable but +// not trivially destructible. +TEST(UniquePtr, MoveConstruct) { + for (size_t size = 0; size < 16; ++size) { + SCOPED_TRACE(size); + + absl::InlinedVector, 2> a; + for (size_t i = 0; i < size; ++i) { + a.push_back(std::make_unique(i)); + } + + absl::InlinedVector, 2> b(std::move(a)); + + ASSERT_THAT(b, SizeIs(size)); + for (size_t i = 0; i < size; ++i) { + ASSERT_THAT(b[i], Pointee(i)); + } + } +} + +// Move assignment of a container of unique pointers should work fine, with no +// leaks, despite the fact that unique pointers are trivially relocatable but +// not trivially destructible. +TEST(UniquePtr, MoveAssign) { + for (size_t size = 0; size < 16; ++size) { + SCOPED_TRACE(size); + + absl::InlinedVector, 2> a; + for (size_t i = 0; i < size; ++i) { + a.push_back(std::make_unique(i)); + } + + absl::InlinedVector, 2> b; + b = std::move(a); + + ASSERT_THAT(b, SizeIs(size)); + for (size_t i = 0; i < size; ++i) { + ASSERT_THAT(b[i], Pointee(i)); + } + } +} + +// At the end of this test loop, the elements between [erase_begin, erase_end) +// should have reference counts == 0, and all others elements should have +// reference counts == 1. +TEST(RefCountedVec, EraseBeginEnd) { + for (size_t len = 1; len < 20; ++len) { + for (size_t erase_begin = 0; erase_begin < len; ++erase_begin) { + for (size_t erase_end = erase_begin; erase_end <= len; ++erase_end) { + std::vector counts(len, 0); + RefCountedVec v; + for (size_t i = 0; i < len; ++i) { + v.push_back(RefCounted(static_cast(i), &counts[i])); + } + + size_t erase_len = erase_end - erase_begin; + + v.erase(v.begin() + erase_begin, v.begin() + erase_end); + + EXPECT_EQ(len - erase_len, v.size()); + + // Check the elements before the first element erased. + for (size_t i = 0; i < erase_begin; ++i) { + EXPECT_EQ(static_cast(i), v[i].value_); + } + + // Check the elements after the first element erased. + for (size_t i = erase_begin; i < v.size(); ++i) { + EXPECT_EQ(static_cast(i + erase_len), v[i].value_); + } + + // Check that the elements at the beginning are preserved. + for (size_t i = 0; i < erase_begin; ++i) { + EXPECT_EQ(1, counts[i]); + } + + // Check that the erased elements are destroyed + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(0, counts[i]); + } + + // Check that the elements at the end are preserved. + for (size_t i = erase_end; i < len; ++i) { + EXPECT_EQ(1, counts[i]); + } + } + } + } +} + +struct NoDefaultCtor { + explicit NoDefaultCtor(int) {} +}; +struct NoCopy { + NoCopy() {} + NoCopy(const NoCopy&) = delete; +}; +struct NoAssign { + NoAssign() {} + NoAssign& operator=(const NoAssign&) = delete; +}; +struct MoveOnly { + MoveOnly() {} + MoveOnly(MoveOnly&&) = default; + MoveOnly& operator=(MoveOnly&&) = default; +}; +TEST(InlinedVectorTest, NoDefaultCtor) { + absl::InlinedVector v(10, NoDefaultCtor(2)); + (void)v; +} +TEST(InlinedVectorTest, NoCopy) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, NoAssign) { + absl::InlinedVector v(10); + (void)v; +} +TEST(InlinedVectorTest, MoveOnly) { + absl::InlinedVector v; + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.push_back(MoveOnly{}); + v.erase(v.begin()); + v.push_back(MoveOnly{}); + v.erase(v.begin(), v.begin() + 1); + v.insert(v.begin(), MoveOnly{}); + v.emplace(v.begin()); + v.emplace(v.begin(), MoveOnly{}); +} +TEST(InlinedVectorTest, Noexcept) { + EXPECT_TRUE(std::is_nothrow_move_constructible::value); + EXPECT_TRUE((std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); + + struct MoveCanThrow { + MoveCanThrow(MoveCanThrow&&) {} + }; + EXPECT_EQ(absl::default_allocator_is_nothrow::value, + (std::is_nothrow_move_constructible< + absl::InlinedVector>::value)); +} + +TEST(InlinedVectorTest, EmplaceBack) { + absl::InlinedVector, 1> v; + + auto& inlined_element = v.emplace_back("answer", 42); + EXPECT_EQ(&inlined_element, &v[0]); + EXPECT_EQ(inlined_element.first, "answer"); + EXPECT_EQ(inlined_element.second, 42); + + auto& allocated_element = v.emplace_back("taxicab", 1729); + EXPECT_EQ(&allocated_element, &v[1]); + EXPECT_EQ(allocated_element.first, "taxicab"); + EXPECT_EQ(allocated_element.second, 1729); +} + +TEST(InlinedVectorTest, ShrinkToFitGrowingVector) { + absl::InlinedVector, 1> v; + + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1u); + + v.emplace_back("answer", 42); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1u); + + v.emplace_back("taxicab", 1729); + EXPECT_GE(v.capacity(), 2u); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2u); + + v.reserve(100); + EXPECT_GE(v.capacity(), 100u); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2u); +} + +TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { + { + absl::InlinedVector, 1> v; + v.emplace_back("answer", 42); + v.emplace_back("taxicab", 1729); + EXPECT_GE(v.capacity(), 2u); + v.pop_back(); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 1u); + EXPECT_EQ(v[0].first, "answer"); + EXPECT_EQ(v[0].second, 42); + } + + { + absl::InlinedVector v(100); + v.resize(0); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2u); // inlined capacity + } + + { + absl::InlinedVector v(100); + v.resize(1); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2u); // inlined capacity + } + + { + absl::InlinedVector v(100); + v.resize(2); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 2u); + } + + { + absl::InlinedVector v(100); + v.resize(3); + v.shrink_to_fit(); + EXPECT_EQ(v.capacity(), 3u); + } +} + +TEST(IntVec, Insert) { + for (size_t len = 0; len < 20; len++) { + for (ptrdiff_t pos = 0; pos <= static_cast(len); pos++) { + { + // Single element + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // n elements + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + IntVec::size_type n = 5; + std_v.insert(std_v.begin() + pos, n, 9999); + IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (random access iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::vector input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (forward iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + const std::forward_list input = {9999, 8888, 7777}; + std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); + IntVec::iterator it = + v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Iterator range (input iterator) + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888, 7777}); + std::istringstream input("9999 8888 7777"); + IntVec::iterator it = + v.insert(v.cbegin() + pos, std::istream_iterator(input), + std::istream_iterator()); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + { + // Initializer list + std::vector std_v; + Fill(&std_v, len); + IntVec v; + Fill(&v, len); + + std_v.insert(std_v.begin() + pos, {9999, 8888}); + IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888}); + EXPECT_THAT(v, ElementsAreArray(std_v)); + EXPECT_EQ(it, v.cbegin() + pos); + } + } + } +} + +TEST(RefCountedVec, InsertConstructorDestructor) { + // Make sure the proper construction/destruction happen during insert + // operations. + for (size_t len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (size_t pos = 0; pos <= len; pos++) { + SCOPED_TRACE(pos); + std::vector counts(len, 0); + int inserted_count = 0; + RefCountedVec v; + for (size_t i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(RefCounted(static_cast(i), &counts[i])); + } + + EXPECT_THAT(counts, Each(Eq(1))); + + RefCounted insert_element(9999, &inserted_count); + EXPECT_EQ(1, inserted_count); + v.insert(v.begin() + pos, insert_element); + EXPECT_EQ(2, inserted_count); + // Check that the elements at the end are preserved. + EXPECT_THAT(counts, Each(Eq(1))); + EXPECT_EQ(2, inserted_count); + } + } +} + +TEST(IntVec, Resize) { + for (size_t len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + // Try resizing up and down by k elements + static const int kResizeElem = 1000000; + for (size_t k = 0; k < 10; k++) { + // Enlarging resize + v.resize(len + k, kResizeElem); + EXPECT_EQ(len + k, v.size()); + EXPECT_LE(len + k, v.capacity()); + for (size_t i = 0; i < len + k; i++) { + if (i < len) { + EXPECT_EQ(static_cast(i), v[i]); + } else { + EXPECT_EQ(kResizeElem, v[i]); + } + } + + // Shrinking resize + v.resize(len, kResizeElem); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (size_t i = 0; i < len; i++) { + EXPECT_EQ(static_cast(i), v[i]); + } + } + } +} + +TEST(IntVec, InitWithLength) { + for (size_t len = 0; len < 20; len++) { + IntVec v(len, 7); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + for (size_t i = 0; i < len; i++) { + EXPECT_EQ(7, v[i]); + } + } +} + +TEST(IntVec, CopyConstructorAndAssignment) { + for (size_t len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + EXPECT_EQ(len, v.size()); + EXPECT_LE(len, v.capacity()); + + IntVec v2(v); + EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2); + + for (size_t start_len = 0; start_len < 20; start_len++) { + IntVec v3; + Fill(&v3, start_len, 99); // Add dummy elements that should go away + v3 = v; + EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3); + } + } +} + +TEST(IntVec, AliasingCopyAssignment) { + for (size_t len = 0; len < 20; ++len) { + IntVec original; + Fill(&original, len); + IntVec dup = original; + dup = *&dup; + EXPECT_EQ(dup, original); + } +} + +TEST(IntVec, MoveConstructorAndAssignment) { + for (size_t len = 0; len < 20; len++) { + IntVec v_in; + const size_t inlined_capacity = v_in.capacity(); + Fill(&v_in, len); + EXPECT_EQ(len, v_in.size()); + EXPECT_LE(len, v_in.capacity()); + + { + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + IntVec v_out(std::move(v_temp)); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + for (size_t start_len = 0; start_len < 20; start_len++) { + IntVec v_out; + Fill(&v_out, start_len, 99); // Add dummy elements that should go away + IntVec v_temp(v_in); + auto* old_data = v_temp.data(); + v_out = std::move(v_temp); + EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); + if (v_in.size() > inlined_capacity) { + // Allocation is moved as a whole, data stays in place. + EXPECT_TRUE(v_out.data() == old_data); + } else { + EXPECT_FALSE(v_out.data() == old_data); + } + } + } +} + +class NotTriviallyDestructible { + public: + NotTriviallyDestructible() : p_(new int(1)) {} + explicit NotTriviallyDestructible(int i) : p_(new int(i)) {} + + NotTriviallyDestructible(const NotTriviallyDestructible& other) + : p_(new int(*other.p_)) {} + + NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) { + p_ = absl::make_unique(*other.p_); + return *this; + } + + bool operator==(const NotTriviallyDestructible& other) const { + return *p_ == *other.p_; + } + + private: + std::unique_ptr p_; +}; + +TEST(AliasingTest, Emplace) { + for (size_t i = 2; i < 20; ++i) { + absl::InlinedVector vec; + for (size_t j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(static_cast(j))); + } + vec.emplace(vec.begin(), vec[0]); + EXPECT_EQ(vec[0], vec[1]); + vec.emplace(vec.begin() + i / 2, vec[i / 2]); + EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]); + vec.emplace(vec.end() - 1, vec.back()); + EXPECT_EQ(vec[vec.size() - 2], vec.back()); + } +} + +TEST(AliasingTest, InsertWithCount) { + for (size_t i = 1; i < 20; ++i) { + absl::InlinedVector vec; + for (size_t j = 0; j < i; ++j) { + vec.push_back(NotTriviallyDestructible(static_cast(j))); + } + for (size_t n = 0; n < 5; ++n) { + // We use back where we can because it's guaranteed to become invalidated + vec.insert(vec.begin(), n, vec.back()); + auto b = vec.begin(); + EXPECT_TRUE( + std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) { + return x == vec.back(); + })); + + auto m_idx = vec.size() / 2; + vec.insert(vec.begin() + m_idx, n, vec.back()); + auto m = vec.begin() + m_idx; + EXPECT_TRUE( + std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) { + return x == vec.back(); + })); + + // We want distinct values so the equality test is meaningful, + // vec[vec.size() - 1] is also almost always invalidated. + auto old_e = vec.size() - 1; + auto val = vec[old_e]; + vec.insert(vec.end(), n, vec[old_e]); + auto e = vec.begin() + old_e; + EXPECT_TRUE(std::all_of( + e, e + n, + [&val](const NotTriviallyDestructible& x) { return x == val; })); + } + } +} + +TEST(OverheadTest, Storage) { + // Check for size overhead. + // In particular, ensure that std::allocator doesn't cost anything to store. + // The union should be absorbing some of the allocation bookkeeping overhead + // in the larger vectors, leaving only the size_ field as overhead. + + struct T { void* val; }; + size_t expected_overhead = sizeof(T); + + EXPECT_EQ((2 * expected_overhead), + sizeof(absl::InlinedVector) - sizeof(T[1])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[2])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[3])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[4])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[5])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[6])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[7])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector) - sizeof(T[8])); +} + +TEST(IntVec, Clear) { + for (size_t len = 0; len < 20; len++) { + SCOPED_TRACE(len); + IntVec v; + Fill(&v, len); + v.clear(); + EXPECT_EQ(0u, v.size()); + EXPECT_EQ(v.begin(), v.end()); + } +} + +TEST(IntVec, Reserve) { + for (size_t len = 0; len < 20; len++) { + IntVec v; + Fill(&v, len); + + for (size_t newlen = 0; newlen < 100; newlen++) { + const int* start_rep = v.data(); + v.reserve(newlen); + const int* final_rep = v.data(); + if (newlen <= len) { + EXPECT_EQ(start_rep, final_rep); + } + EXPECT_LE(newlen, v.capacity()); + + // Filling up to newlen should not change rep + while (v.size() < newlen) { + v.push_back(0); + } + EXPECT_EQ(final_rep, v.data()); + } + } +} + +TEST(StringVec, SelfRefPushBack) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_THAT(v, ElementsAreArray(std_v)); + + v.push_back(v.back()); + std_v.push_back(std_v.back()); + } + EXPECT_THAT(v, ElementsAreArray(std_v)); +} + +TEST(StringVec, SelfRefPushBackWithMove) { + std::vector std_v; + absl::InlinedVector v; + const std::string s = "A quite long string to ensure heap."; + std_v.push_back(s); + v.push_back(s); + for (int i = 0; i < 20; ++i) { + EXPECT_EQ(v.back(), std_v.back()); + + v.push_back(std::move(v.back())); + std_v.push_back(std::move(std_v.back())); + } + EXPECT_EQ(v.back(), std_v.back()); +} + +TEST(StringVec, SelfMove) { + const std::string s = "A quite long string to ensure heap."; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + absl::InlinedVector v; + for (int i = 0; i < len; ++i) { + SCOPED_TRACE(i); + v.push_back(s); + } + // Indirection necessary to avoid compiler warning. + v = std::move(*(&v)); + // Ensure that the inlined vector is still in a valid state by copying it. + // We don't expect specific contents since a self-move results in an + // unspecified valid state. + std::vector copy(v.begin(), v.end()); + } +} + +TEST(IntVec, Swap) { + for (size_t l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (size_t l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + IntVec a = Fill(l1, 0); + IntVec b = Fill(l2, 100); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (size_t i = 0; i < l1; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(static_cast(i), b[i]); + } + for (size_t i = 0; i < l2; i++) { + SCOPED_TRACE(i); + EXPECT_EQ(100 + static_cast(i), a[i]); + } + } + } +} + +TYPED_TEST_P(InstanceTest, Swap) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + for (size_t l1 = 0; l1 < 20; l1++) { + SCOPED_TRACE(l1); + for (size_t l2 = 0; l2 < 20; l2++) { + SCOPED_TRACE(l2); + InstanceTracker tracker; + InstanceVec a, b; + const size_t inlined_capacity = a.capacity(); + auto min_len = std::min(l1, l2); + auto max_len = std::max(l1, l2); + for (size_t i = 0; i < l1; i++) + a.push_back(Instance(static_cast(i))); + for (size_t i = 0; i < l2; i++) + b.push_back(Instance(100 + static_cast(i))); + EXPECT_EQ(tracker.instances(), static_cast(l1 + l2)); + tracker.ResetCopiesMovesSwaps(); + { + using std::swap; + swap(a, b); + } + EXPECT_EQ(tracker.instances(), static_cast(l1 + l2)); + if (a.size() > inlined_capacity && b.size() > inlined_capacity) { + EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. + EXPECT_EQ(tracker.moves(), 0); + } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { + EXPECT_EQ(tracker.swaps(), static_cast(min_len)); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + static_cast(max_len - min_len)); + } else { + // One is allocated and the other isn't. The allocation is transferred + // without copying elements, and the inlined instances are copied/moved. + EXPECT_EQ(tracker.swaps(), 0); + EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), + static_cast(min_len)); + } + + EXPECT_EQ(l1, b.size()); + EXPECT_EQ(l2, a.size()); + for (size_t i = 0; i < l1; i++) { + EXPECT_EQ(static_cast(i), b[i].value()); + } + for (size_t i = 0; i < l2; i++) { + EXPECT_EQ(100 + static_cast(i), a[i].value()); + } + } + } +} + +TEST(IntVec, EqualAndNotEqual) { + IntVec a, b; + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + a.push_back(3); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b.push_back(3); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b.push_back(7); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.push_back(6); + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + a.clear(); + b.clear(); + for (size_t i = 0; i < 100; i++) { + a.push_back(static_cast(i)); + b.push_back(static_cast(i)); + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + + b[i] = b[i] + 1; + EXPECT_FALSE(a == b); + EXPECT_TRUE(a != b); + + b[i] = b[i] - 1; // Back to before + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); + } +} + +TEST(IntVec, RelationalOps) { + IntVec a, b; + EXPECT_FALSE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_FALSE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_TRUE(b <= a); + EXPECT_TRUE(a >= b); + EXPECT_TRUE(b >= a); + b.push_back(3); + EXPECT_TRUE(a < b); + EXPECT_FALSE(b < a); + EXPECT_FALSE(a > b); + EXPECT_TRUE(b > a); + EXPECT_TRUE(a <= b); + EXPECT_FALSE(b <= a); + EXPECT_FALSE(a >= b); + EXPECT_TRUE(b >= a); +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (size_t len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (size_t i = 0; i < len; i++) { + v.push_back(Instance(static_cast(i))); + } + EXPECT_EQ(tracker.instances(), static_cast(len)); + EXPECT_GE(tracker.copies() + tracker.moves(), + static_cast(len)); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + + // Enlarging resize() must construct some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len + 10, Instance(100)); + EXPECT_EQ(tracker.instances(), static_cast(len) + 10); + if (len <= inlined_capacity && len + 10 > inlined_capacity) { + EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + static_cast(len)); + } else { + // Only specify a minimum number of copies + moves. We don't want to + // depend on the reallocation policy here. + EXPECT_GE(tracker.copies() + tracker.moves(), + 10); // More due to reallocation. + } + + // Shrinking resize() must destroy some objects + tracker.ResetCopiesMovesSwaps(); + v.resize(len, Instance(100)); + EXPECT_EQ(tracker.instances(), static_cast(len)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + // reserve() must not increase the number of initialized objects + SCOPED_TRACE("reserve"); + v.reserve(len + 1000); + EXPECT_EQ(tracker.instances(), static_cast(len)); + EXPECT_EQ(tracker.copies() + tracker.moves(), static_cast(len)); + + // pop_back() and erase() must destroy one object + if (len > 0) { + tracker.ResetCopiesMovesSwaps(); + v.pop_back(); + EXPECT_EQ(tracker.instances(), static_cast(len) - 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + + if (!v.empty()) { + tracker.ResetCopiesMovesSwaps(); + v.erase(v.begin()); + EXPECT_EQ(tracker.instances(), static_cast(len) - 2); + EXPECT_EQ(tracker.copies() + tracker.moves(), + static_cast(len) - 2); + } + } + + tracker.ResetCopiesMovesSwaps(); + int instances_before_empty_erase = tracker.instances(); + v.erase(v.begin(), v.begin()); + EXPECT_EQ(tracker.instances(), instances_before_empty_erase); + EXPECT_EQ(tracker.copies() + tracker.moves(), 0); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { // Copy constructor should create 'len' more instances. + InstanceVec v_copy(v); + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + EXPECT_EQ(tracker.instances(), len); + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec v; + const size_t inlined_capacity = v.capacity(); + for (int i = 0; i < len; i++) { + v.push_back(Instance(i)); + } + EXPECT_EQ(tracker.instances(), len); + EXPECT_GE(tracker.copies() + tracker.moves(), + len); // More due to reallocation. + tracker.ResetCopiesMovesSwaps(); + { + InstanceVec v_copy(std::move(v)); + if (static_cast(len) > inlined_capacity) { + // Allocation is moved as a whole. + EXPECT_EQ(tracker.instances(), len); + EXPECT_EQ(tracker.live_instances(), len); + // Tests an implementation detail, don't rely on this in your code. + EXPECT_EQ(v.size(), 0u); // NOLINT misc-use-after-move + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + EXPECT_EQ(tracker.instances(), len + len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.live_instances(), len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), len); + } else { + EXPECT_EQ(tracker.live_instances(), len + len); + EXPECT_EQ(tracker.copies(), len); + EXPECT_EQ(tracker.moves(), 0); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + if (longorshort) { + shorter = longer; + EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1)); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + 1); // More due to reallocation. + } else { + longer = shorter; + EXPECT_EQ(tracker.instances(), len + len); + EXPECT_EQ(tracker.copies() + tracker.moves(), len); + } + } + } +} + +TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { + using Instance = TypeParam; + using InstanceVec = absl::InlinedVector; + InstanceTracker tracker; + for (int len = 0; len < 20; len++) { + SCOPED_TRACE(len); + for (int longorshort = 0; longorshort <= 1; ++longorshort) { + SCOPED_TRACE(longorshort); + tracker.ResetCopiesMovesSwaps(); + + InstanceVec longer, shorter; + const size_t inlined_capacity = longer.capacity(); + for (int i = 0; i < len; i++) { + longer.push_back(Instance(i)); + shorter.push_back(Instance(i)); + } + longer.push_back(Instance(len)); + EXPECT_EQ(tracker.instances(), len + len + 1); + EXPECT_GE(tracker.copies() + tracker.moves(), + len + len + 1); // More due to reallocation. + + tracker.ResetCopiesMovesSwaps(); + int src_len; + if (longorshort) { + src_len = len + 1; + shorter = std::move(longer); + } else { + src_len = len; + longer = std::move(shorter); + } + if (static_cast(src_len) > inlined_capacity) { + // Allocation moved as a whole. + EXPECT_EQ(tracker.instances(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 0); + } else { + // Elements are all copied. + EXPECT_EQ(tracker.instances(), src_len + src_len); + if (Instance::supports_move()) { + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), src_len); + EXPECT_EQ(tracker.live_instances(), src_len); + } else { + EXPECT_EQ(tracker.copies(), src_len); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), src_len + src_len); + } + } + EXPECT_EQ(tracker.swaps(), 0); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithInlineBacking) { + const size_t inlined_capacity = absl::InlinedVector().capacity(); + + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, 123); + EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(123, 123))); + if (original_size <= inlined_capacity) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(v.capacity(), inlined_capacity); + } + } +} + +TEST(CountElemAssign, SimpleTypeWithAllocation) { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, 123); + EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(123, 123, 123))); + EXPECT_LE(v.size(), v.capacity()); + } +} + +TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(2, Instance(123)); + EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(ValueIs(123), ValueIs(123)))); + if (original_size <= 2) { + // If the original had inline backing, it should stay inline. + EXPECT_EQ(2u, v.capacity()); + } + } +} + +template +void InstanceCountElemAssignWithAllocationTest() { + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(3, Instance(123)); + EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(ValueIs(123), ValueIs(123), + ValueIs(123)))); + EXPECT_LE(v.size(), v.capacity()); + } +} +TEST(CountElemAssign, WithAllocationCopyableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} +TEST(CountElemAssign, WithAllocationCopyableMovableInstance) { + InstanceCountElemAssignWithAllocationTest(); +} + +TEST(RangedConstructor, SimpleType) { + std::vector source_v = {4, 5, 6}; + // First try to fit in inline backing + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ(3u, v.size()); + EXPECT_EQ(4u, + v.capacity()); // Indication that we're still on inlined storage + EXPECT_EQ(4, v[0]); + EXPECT_EQ(5, v[1]); + EXPECT_EQ(6, v[2]); + + // Now, force a re-allocate + absl::InlinedVector realloc_v(source_v.begin(), source_v.end()); + EXPECT_EQ(3u, realloc_v.size()); + EXPECT_LT(2u, realloc_v.capacity()); + EXPECT_EQ(4, realloc_v[0]); + EXPECT_EQ(5, realloc_v[1]); + EXPECT_EQ(6, realloc_v[2]); +} + +// Test for ranged constructors using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedConstructorTestForContainer() { + InstanceTracker tracker; + SourceContainer source_v = {Instance(0), Instance(1)}; + tracker.ResetCopiesMovesSwaps(); + absl::InlinedVector v(source_v.begin(), + source_v.end()); + EXPECT_EQ(2u, v.size()); + EXPECT_LT(1u, v.capacity()); + EXPECT_EQ(0, v[0].value()); + EXPECT_EQ(1, v[1].value()); + EXPECT_EQ(tracker.copies(), 2); + EXPECT_EQ(tracker.moves(), 0); +} + +template +void InstanceRangedConstructorTestWithCapacity() { + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + { + SCOPED_TRACE("std::list"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + { + SCOPED_TRACE("const std::list"); + InstanceRangedConstructorTestForContainer< + Instance, const std::list, inlined_capacity>(); + } + { + SCOPED_TRACE("std::vector"); + InstanceRangedConstructorTestForContainer, + inlined_capacity>(); + } + { + SCOPED_TRACE("const std::vector"); + InstanceRangedConstructorTestForContainer< + Instance, const std::vector, inlined_capacity>(); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedConstructor) { + using Instance = TypeParam; + SCOPED_TRACE("capacity=1"); + InstanceRangedConstructorTestWithCapacity(); + SCOPED_TRACE("capacity=2"); + InstanceRangedConstructorTestWithCapacity(); +} + +TEST(RangedConstructor, ElementsAreConstructed) { + std::vector source_v = {"cat", "dog"}; + + // Force expansion and re-allocation of v. Ensures that when the vector is + // expanded that new elements are constructed. + absl::InlinedVector v(source_v.begin(), source_v.end()); + EXPECT_EQ("cat", v[0]); + EXPECT_EQ("dog", v[1]); +} + +TEST(RangedAssign, SimpleType) { + const size_t inlined_capacity = absl::InlinedVector().capacity(); + + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, 12345); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + std::vector new_contents; + for (size_t i = 0; i < target_size; ++i) { + new_contents.push_back(static_cast(i + 3)); + } + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= inlined_capacity && + original_size <= inlined_capacity) { + // Storage should stay inline when target size is small. + EXPECT_EQ(v.capacity(), inlined_capacity); + } + EXPECT_THAT(v, ElementsAreArray(new_contents)); + } + } +} + +// Returns true if lhs and rhs have the same value. +template +static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) { + return lhs.value() == rhs.value(); +} + +// Test for ranged assign() using Instance as the element type and +// SourceContainer as the source container type. +template +void InstanceRangedAssignTestForContainer() { + // Test for all combinations of original sizes (empty and non-empty inline, + // and out of line) and target sizes. + for (size_t original_size = 0; original_size <= 5; ++original_size) { + SCOPED_TRACE(original_size); + // Original contents are [12345, 12345, ...] + std::vector original_contents(original_size, Instance(12345)); + + for (size_t target_size = 0; target_size <= 5; ++target_size) { + SCOPED_TRACE(target_size); + + // New contents are [3, 4, ...] + // Generate data using a non-const container, because SourceContainer + // itself may be const. + // TODO(bsamwel): Test with an input iterator. + std::vector new_contents_in; + for (size_t i = 0; i < target_size; ++i) { + new_contents_in.push_back(Instance(static_cast(i) + 3)); + } + SourceContainer new_contents(new_contents_in.begin(), + new_contents_in.end()); + + absl::InlinedVector v(original_contents.begin(), + original_contents.end()); + v.assign(new_contents.begin(), new_contents.end()); + + EXPECT_EQ(new_contents.size(), v.size()); + EXPECT_LE(new_contents.size(), v.capacity()); + if (target_size <= 3 && original_size <= 3) { + // Storage should stay inline when target size is small. + EXPECT_EQ(3u, v.capacity()); + } + EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(), + InstanceValuesEqual)); + } + } +} + +TYPED_TEST_P(InstanceTest, RangedAssign) { + using Instance = TypeParam; + // Test with const and non-const, random access and non-random-access sources. + // TODO(bsamwel): Test with an input iterator source. + SCOPED_TRACE("std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::list"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("std::vector"); + InstanceRangedAssignTestForContainer>(); + SCOPED_TRACE("const std::vector"); + InstanceRangedAssignTestForContainer>(); +} + +TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3u), CapacityIs(4u), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) { + EXPECT_THAT((absl::InlinedVector{4, 5, 6}), + AllOf(SizeIs(3u), CapacityIs(Gt(2u)), ElementsAre(4, 5, 6))); +} + +TEST(InitializerListConstructor, DisparateTypesInList) { + EXPECT_THAT((absl::InlinedVector{-7, 8ULL}), ElementsAre(-7, 8)); + + EXPECT_THAT((absl::InlinedVector{"foo", std::string("bar")}), + ElementsAre("foo", "bar")); +} + +TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) { + const size_t inlined_capacity = + absl::InlinedVector().capacity(); + EXPECT_THAT( + (absl::InlinedVector{ + CopyableMovableInstance(0)}), + AllOf(SizeIs(1u), CapacityIs(inlined_capacity), ElementsAre(ValueIs(0)))); +} + +TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) { + EXPECT_THAT((absl::InlinedVector{ + CopyableMovableInstance(0), CopyableMovableInstance(1)}), + AllOf(SizeIs(2u), CapacityIs(Gt(1u)), + ElementsAre(ValueIs(0), ValueIs(1)))); +} + +TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + + absl::InlinedVector v1(original_size, 12345); + const size_t original_capacity_v1 = v1.capacity(); + v1.assign({3}); + EXPECT_THAT(v1, AllOf(SizeIs(1u), CapacityIs(original_capacity_v1), + ElementsAre(3))); + + absl::InlinedVector v2(original_size, 12345); + const size_t original_capacity_v2 = v2.capacity(); + v2 = {3}; + EXPECT_THAT(v2, AllOf(SizeIs(1u), CapacityIs(original_capacity_v2), + ElementsAre(3))); + } +} + +TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) { + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v1(original_size, 12345); + v1.assign({3, 4, 5}); + EXPECT_THAT(v1, AllOf(SizeIs(3u), ElementsAre(3, 4, 5))); + EXPECT_LE(3u, v1.capacity()); + + absl::InlinedVector v2(original_size, 12345); + v2 = {3, 4, 5}; + EXPECT_THAT(v2, AllOf(SizeIs(3u), ElementsAre(3, 4, 5))); + EXPECT_LE(3u, v2.capacity()); + } +} + +TEST(InitializerListAssign, DisparateTypesInList) { + absl::InlinedVector v_int1; + v_int1.assign({-7, 8ULL}); + EXPECT_THAT(v_int1, ElementsAre(-7, 8)); + + absl::InlinedVector v_int2; + v_int2 = {-7, 8ULL}; + EXPECT_THAT(v_int2, ElementsAre(-7, 8)); + + absl::InlinedVector v_string1; + v_string1.assign({"foo", std::string("bar")}); + EXPECT_THAT(v_string1, ElementsAre("foo", "bar")); + + absl::InlinedVector v_string2; + v_string2 = {"foo", std::string("bar")}; + EXPECT_THAT(v_string2, ElementsAre("foo", "bar")); +} + +TYPED_TEST_P(InstanceTest, InitializerListAssign) { + using Instance = TypeParam; + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + const size_t original_capacity = v.capacity(); + v.assign({Instance(3)}); + EXPECT_THAT(v, AllOf(SizeIs(1u), CapacityIs(original_capacity), + ElementsAre(ValueIs(3)))); + } + for (size_t original_size = 0; original_size <= 4; ++original_size) { + SCOPED_TRACE(original_size); + absl::InlinedVector v(original_size, Instance(12345)); + v.assign({Instance(3), Instance(4), Instance(5)}); + EXPECT_THAT( + v, AllOf(SizeIs(3u), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); + EXPECT_LE(3u, v.capacity()); + } +} + +REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors, + CountConstructorsDestructorsOnCopyConstruction, + CountConstructorsDestructorsOnMoveConstruction, + CountConstructorsDestructorsOnAssignment, + CountConstructorsDestructorsOnMoveAssignment, + CountElemAssignInlineBacking, RangedConstructor, + RangedAssign, InitializerListAssign); + +using InstanceTypes = + ::testing::Types; +INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest, + InstanceTypes); + +TEST(DynamicVec, DynamicVecCompiles) { + DynamicVec v; + (void)v; +} + +TEST(DynamicVec, CreateNonEmptyDynamicVec) { + DynamicVec v(1); + EXPECT_EQ(v.size(), 1u); +} + +TEST(DynamicVec, EmplaceBack) { + DynamicVec v; + v.emplace_back(Dynamic{}); + EXPECT_EQ(v.size(), 1u); +} + +TEST(DynamicVec, EmplaceBackAfterHeapAllocation) { + DynamicVec v; + v.reserve(10); + v.emplace_back(Dynamic{}); + EXPECT_EQ(v.size(), 1u); +} + +TEST(DynamicVec, EmptyIteratorComparison) { + DynamicVec v; + EXPECT_EQ(v.begin(), v.end()); + EXPECT_EQ(v.cbegin(), v.cend()); +} + +TEST(AllocatorSupportTest, Constructors) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { AllocVec ABSL_ATTRIBUTE_UNUSED v; } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); } + + AllocVec v2; + { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); } + { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); } +} + +TEST(AllocatorSupportTest, CountAllocations) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; + int64_t allocated = 0; + MyAlloc alloc(&allocated); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc); + EXPECT_THAT(allocated, Eq(0)); + } + EXPECT_THAT(allocated, Eq(0)); + { + AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); + EXPECT_THAT(allocated, Eq(static_cast(v.size() * sizeof(int)))); + } + EXPECT_THAT(allocated, Eq(0)); + { + AllocVec v(4, 1, alloc); + EXPECT_THAT(allocated, Eq(0)); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, Eq(0)); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, Eq(0)); + } + EXPECT_THAT(allocated, 0); + { + AllocVec v(8, 2, alloc); + EXPECT_THAT(allocated, Eq(static_cast(v.size() * sizeof(int)))); + + int64_t allocated2 = 0; + MyAlloc alloc2(&allocated2); + AllocVec v2(v, alloc2); + EXPECT_THAT(allocated2, Eq(static_cast(v2.size() * sizeof(int)))); + + int64_t allocated3 = 0; + MyAlloc alloc3(&allocated3); + AllocVec v3(std::move(v), alloc3); + EXPECT_THAT(allocated3, Eq(static_cast(v3.size() * sizeof(int)))); + } + EXPECT_EQ(allocated, 0); + { + // Test shrink_to_fit deallocations. + AllocVec v(8, 2, alloc); + EXPECT_EQ(allocated, static_cast(8 * sizeof(int))); + v.resize(5); + EXPECT_EQ(allocated, static_cast(8 * sizeof(int))); + v.shrink_to_fit(); + EXPECT_EQ(allocated, static_cast(5 * sizeof(int))); + v.resize(4); + EXPECT_EQ(allocated, static_cast(5 * sizeof(int))); + v.shrink_to_fit(); + EXPECT_EQ(allocated, 0); + } +} + +TEST(AllocatorSupportTest, SwapBothAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; + const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_LT(v1.capacity(), v2.capacity()); + EXPECT_THAT(allocated1, + Eq(static_cast(v1.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, + Eq(static_cast(v2.capacity() * sizeof(int)))); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, + Eq(static_cast(v2.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, + Eq(static_cast(v1.capacity() * sizeof(int)))); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, SwapOneAllocated) { + using MyAlloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + int64_t allocated1 = 0; + int64_t allocated2 = 0; + { + const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; + const int ia2[] = {0, 1, 2, 3}; + MyAlloc a1(&allocated1); + MyAlloc a2(&allocated2); + AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); + AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); + EXPECT_THAT(allocated1, + Eq(static_cast(v1.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, Eq(0)); + v1.swap(v2); + EXPECT_THAT(v1, ElementsAreArray(ia2)); + EXPECT_THAT(v2, ElementsAreArray(ia1)); + EXPECT_THAT(allocated1, + Eq(static_cast(v2.capacity() * sizeof(int)))); + EXPECT_THAT(allocated2, Eq(0)); + EXPECT_TRUE(v2.get_allocator() == a1); + EXPECT_TRUE(v1.get_allocator() == a2); + } + EXPECT_THAT(allocated1, 0); + EXPECT_THAT(allocated2, 0); +} + +TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) { + using StdVector = std::vector>; + using Alloc = CountingAllocator; + using ScopedAlloc = std::scoped_allocator_adaptor; + using AllocVec = absl::InlinedVector; + + int64_t total_allocated_byte_count = 0; + + AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); + + // Called only once to remain inlined + inlined_case.emplace_back(); + + int64_t absl_responsible_for_count = total_allocated_byte_count; + + // MSVC's allocator preemptively allocates in debug mode +#if !defined(_MSC_VER) + EXPECT_EQ(absl_responsible_for_count, 0); +#endif // !defined(_MSC_VER) + + inlined_case[0].emplace_back(); + EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); + + inlined_case.clear(); + inlined_case.shrink_to_fit(); + EXPECT_EQ(total_allocated_byte_count, 0); +} + +TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) { + using StdVector = std::vector>; + using Alloc = CountingAllocator; + using ScopedAlloc = std::scoped_allocator_adaptor; + using AllocVec = absl::InlinedVector; + + int64_t total_allocated_byte_count = 0; + + AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); + + // Called twice to force into being allocated + allocated_case.emplace_back(); + allocated_case.emplace_back(); + + int64_t absl_responsible_for_count = total_allocated_byte_count; + EXPECT_GT(absl_responsible_for_count, 0); + + allocated_case[1].emplace_back(); + EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); + + allocated_case.clear(); + allocated_case.shrink_to_fit(); + EXPECT_EQ(total_allocated_byte_count, 0); +} + +TEST(AllocatorSupportTest, SizeAllocConstructor) { + constexpr size_t inlined_size = 4; + using Alloc = CountingAllocator; + using AllocVec = absl::InlinedVector; + + { + auto len = inlined_size / 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Inline storage used; allocator should not be invoked + EXPECT_THAT(allocated, Eq(0)); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } + + { + auto len = inlined_size * 2; + int64_t allocated = 0; + auto v = AllocVec(len, Alloc(&allocated)); + + // Out of line storage used; allocation of 8 elements expected + EXPECT_THAT(allocated, Eq(static_cast(len * sizeof(int)))); + EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); + } +} + +TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) { + using T = int; + using A = std::allocator; + using ATraits = absl::allocator_traits; + + struct MinimumAllocator { + using value_type = T; + + value_type* allocate(size_t n) { + A a; + return ATraits::allocate(a, n); + } + + void deallocate(value_type* p, size_t n) { + A a; + ATraits::deallocate(a, p, n); + } + }; + + absl::InlinedVector vec; + vec.emplace_back(); + vec.resize(0); +} + +TEST(InlinedVectorTest, AbslHashValueWorks) { + using V = absl::InlinedVector; + std::vector cases; + + // Generate a variety of vectors some of these are small enough for the inline + // space but are stored out of line. + for (size_t i = 0; i < 10; ++i) { + V v; + for (int j = 0; j < static_cast(i); ++j) { + v.push_back(j); + } + cases.push_back(v); + v.resize(i % 4); + cases.push_back(v); + } + + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +} + +class MoveConstructibleOnlyInstance + : public absl::test_internal::BaseCountedInstance { + public: + explicit MoveConstructibleOnlyInstance(int x) : BaseCountedInstance(x) {} + MoveConstructibleOnlyInstance(MoveConstructibleOnlyInstance&& other) = + default; + MoveConstructibleOnlyInstance& operator=( + MoveConstructibleOnlyInstance&& other) = delete; +}; + +MATCHER(HasValue, "") { + return ::testing::get<0>(arg).value() == ::testing::get<1>(arg); +} + +TEST(NonAssignableMoveAssignmentTest, AllocatedToInline) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + inlined = std::move(allocated); + // passed ownership of the allocated storage + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonAssignableMoveAssignmentTest, InlineToAllocated) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + allocated = std::move(inlined); + // Moved elements + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 1); + + EXPECT_THAT(allocated, Pointwise(HasValue(), {1})); +} + +TEST(NonAssignableMoveAssignmentTest, InlineToInline) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector inlined_a; + inlined_a.emplace_back(1); + absl::InlinedVector inlined_b; + inlined_b.emplace_back(1); + tracker.ResetCopiesMovesSwaps(); + + inlined_a = std::move(inlined_b); + // Moved elements + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 1); + + EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1})); +} + +TEST(NonAssignableMoveAssignmentTest, AllocatedToAllocated) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector allocated_a; + allocated_a.emplace_back(1); + allocated_a.emplace_back(2); + allocated_a.emplace_back(3); + absl::InlinedVector allocated_b; + allocated_b.emplace_back(4); + allocated_b.emplace_back(5); + allocated_b.emplace_back(6); + allocated_b.emplace_back(7); + tracker.ResetCopiesMovesSwaps(); + + allocated_a = std::move(allocated_b); + // passed ownership of the allocated storage + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 4); + + EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7})); +} + +TEST(NonAssignableMoveAssignmentTest, AssignThis) { + using X = MoveConstructibleOnlyInstance; + InstanceTracker tracker; + absl::InlinedVector v; + v.emplace_back(1); + v.emplace_back(2); + v.emplace_back(3); + + tracker.ResetCopiesMovesSwaps(); + + // Obfuscated in order to pass -Wself-move. + v = std::move(*std::addressof(v)); + // nothing happens + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3})); +} + +class NonSwappableInstance : public absl::test_internal::BaseCountedInstance { + public: + explicit NonSwappableInstance(int x) : BaseCountedInstance(x) {} + NonSwappableInstance(const NonSwappableInstance& other) = default; + NonSwappableInstance& operator=(const NonSwappableInstance& other) = default; + NonSwappableInstance(NonSwappableInstance&& other) = default; + NonSwappableInstance& operator=(NonSwappableInstance&& other) = default; +}; + +void swap(NonSwappableInstance&, NonSwappableInstance&) = delete; + +TEST(NonSwappableSwapTest, InlineAndAllocatedTransferStorageAndMove) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector inlined; + inlined.emplace_back(1); + absl::InlinedVector allocated; + allocated.emplace_back(1); + allocated.emplace_back(2); + allocated.emplace_back(3); + tracker.ResetCopiesMovesSwaps(); + + inlined.swap(allocated); + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(tracker.live_instances(), 4); + + EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonSwappableSwapTest, InlineAndInlineMoveIndividualElements) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector inlined_a; + inlined_a.emplace_back(1); + absl::InlinedVector inlined_b; + inlined_b.emplace_back(2); + tracker.ResetCopiesMovesSwaps(); + + inlined_a.swap(inlined_b); + EXPECT_EQ(tracker.moves(), 3); + EXPECT_EQ(tracker.live_instances(), 2); + + EXPECT_THAT(inlined_a, Pointwise(HasValue(), {2})); + EXPECT_THAT(inlined_b, Pointwise(HasValue(), {1})); +} + +TEST(NonSwappableSwapTest, AllocatedAndAllocatedOnlyTransferStorage) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector allocated_a; + allocated_a.emplace_back(1); + allocated_a.emplace_back(2); + allocated_a.emplace_back(3); + absl::InlinedVector allocated_b; + allocated_b.emplace_back(4); + allocated_b.emplace_back(5); + allocated_b.emplace_back(6); + allocated_b.emplace_back(7); + tracker.ResetCopiesMovesSwaps(); + + allocated_a.swap(allocated_b); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 7); + + EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7})); + EXPECT_THAT(allocated_b, Pointwise(HasValue(), {1, 2, 3})); +} + +TEST(NonSwappableSwapTest, SwapThis) { + using X = NonSwappableInstance; + InstanceTracker tracker; + absl::InlinedVector v; + v.emplace_back(1); + v.emplace_back(2); + v.emplace_back(3); + + tracker.ResetCopiesMovesSwaps(); + + v.swap(v); + EXPECT_EQ(tracker.moves(), 0); + EXPECT_EQ(tracker.live_instances(), 3); + + EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3})); +} + +template +using CharVec = absl::InlinedVector; + +// Warning: This struct "simulates" the type `InlinedVector::Storage::Allocated` +// to make reasonable expectations for inlined storage capacity optimization. If +// implementation changes `Allocated`, then `MySpan` and tests that use it need +// to be updated accordingly. +template +struct MySpan { + T* data; + size_t size; +}; + +TEST(StorageTest, InlinedCapacityAutoIncrease) { + // The requested capacity is auto increased to `sizeof(MySpan)`. + EXPECT_GT(CharVec<1>().capacity(), 1); + EXPECT_EQ(CharVec<1>().capacity(), sizeof(MySpan)); + EXPECT_EQ(CharVec<1>().capacity(), CharVec<2>().capacity()); + EXPECT_EQ(sizeof(CharVec<1>), sizeof(CharVec<2>)); + + // The requested capacity is auto increased to + // `sizeof(MySpan) / sizeof(int)`. + EXPECT_GT((absl::InlinedVector().capacity()), 1); + EXPECT_EQ((absl::InlinedVector().capacity()), + sizeof(MySpan) / sizeof(int)); +} + +} // anonymous namespace diff --git a/third_party/absl/absl/container/internal/btree.h b/third_party/absl/absl/container/internal/btree.h new file mode 100644 index 000000000..91df57a37 --- /dev/null +++ b/third_party/absl/absl/container/internal/btree.h @@ -0,0 +1,3053 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A btree implementation of the STL set and map interfaces. A btree is smaller +// and generally also faster than STL set/map (refer to the benchmarks below). +// The red-black tree implementation of STL set/map has an overhead of 3 +// pointers (left, right and parent) plus the node color information for each +// stored value. So a set consumes 40 bytes for each value stored in +// 64-bit mode. This btree implementation stores multiple values on fixed +// size nodes (usually 256 bytes) and doesn't store child pointers for leaf +// nodes. The result is that a btree_set may use much less memory per +// stored value. For the random insertion benchmark in btree_bench.cc, a +// btree_set with node-size of 256 uses 5.1 bytes per stored value. +// +// The packing of multiple values on to each node of a btree has another effect +// besides better space utilization: better cache locality due to fewer cache +// lines being accessed. Better cache locality translates into faster +// operations. +// +// CAVEATS +// +// Insertions and deletions on a btree can cause splitting, merging or +// rebalancing of btree nodes. And even without these operations, insertions +// and deletions on a btree will move values around within a node. In both +// cases, the result is that insertions and deletions can invalidate iterators +// pointing to values other than the one being inserted/deleted. Therefore, this +// container does not provide pointer stability. This is notably different from +// STL set/map which takes care to not invalidate iterators on insert/erase +// except, of course, for iterators pointing to the value being erased. A +// partial workaround when erasing is available: erase() returns an iterator +// pointing to the item just after the one that was erased (or end() if none +// exists). + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/common_policy_traits.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the nodes and +// iterators. When iterators are used, we validate that the container has not +// been mutated since the iterator was constructed. +#define ABSL_BTREE_ENABLE_GENERATIONS +#endif + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +constexpr bool BtreeGenerationsEnabled() { return true; } +#else +constexpr bool BtreeGenerationsEnabled() { return false; } +#endif + +template +using compare_result_t = absl::result_of_t; + +// A helper class that indicates if the Compare parameter is a key-compare-to +// comparator. +template +using btree_is_key_compare_to = + std::is_convertible, absl::weak_ordering>; + +struct StringBtreeDefaultLess { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(std::less) {} // NOLINT + + // Allow converting to std::less for use in key_comp()/value_comp(). + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } + StringBtreeDefaultLess(std::less) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); + } +}; + +struct StringBtreeDefaultGreater { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) {} // NOLINT + StringBtreeDefaultGreater(std::greater) {} // NOLINT + + // Allow converting to std::greater for use in key_comp()/value_comp(). + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } + StringBtreeDefaultGreater(std::greater) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } +}; + +// See below comments for checked_compare. +template ::value> +struct checked_compare_base : Compare { + using Compare::Compare; + explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} + const Compare &comp() const { return *this; } +}; +template +struct checked_compare_base { + explicit checked_compare_base(Compare c) : compare(std::move(c)) {} + const Compare &comp() const { return compare; } + Compare compare; +}; + +// A mechanism for opting out of checked_compare for use only in btree_test.cc. +struct BtreeTestOnlyCheckedCompareOptOutBase {}; + +// A helper class to adapt the specified comparator for two use cases: +// (1) When using common Abseil string types with common comparison functors, +// convert a boolean comparison into a three-way comparison that returns an +// `absl::weak_ordering`. This helper class is specialized for +// less, greater, less, +// greater, less, and greater. +// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see +// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever +// a comparison is made, we will make assertions to verify that the comparator +// is valid. +template +struct key_compare_adapter { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a + // CompressedTuple and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key &k) const { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T &) const { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const { return comp(); } + + template >::value, + int> = 0> + bool operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template < + typename T, typename U, + absl::enable_if_t, + absl::weak_ordering>::value, + int> = 0> + absl::weak_ordering operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); +#ifndef NDEBUG + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } else if (lhs_comp_rhs == 0) { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } else { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +#endif + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, checked_compare>; +}; + +template <> +struct key_compare_adapter, std::string> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, std::string> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_adapter, absl::string_view> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, absl::string_view> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_adapter, absl::Cord> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, absl::Cord> { + using type = StringBtreeDefaultGreater; +}; + +// Detects an 'absl_btree_prefer_linear_node_search' member. This is +// a protocol used as an opt-in or opt-out of linear search. +// +// For example, this would be useful for key types that wrap an integer +// and define their own cheap operator<(). For example: +// +// class K { +// public: +// using absl_btree_prefer_linear_node_search = std::true_type; +// ... +// private: +// friend bool operator<(K a, K b) { return a.k_ < b.k_; } +// int k_; +// }; +// +// btree_map m; // Uses linear search +// +// If T has the preference tag, then it has a preference. +// Btree will use the tag's truth value. +template +struct has_linear_node_search_preference : std::false_type {}; +template +struct prefers_linear_node_search : std::false_type {}; +template +struct has_linear_node_search_preference< + T, absl::void_t> + : std::true_type {}; +template +struct prefers_linear_node_search< + T, absl::void_t> + : T::absl_btree_prefer_linear_node_search {}; + +template +constexpr bool compare_has_valid_result_type() { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; +} + +template +class map_value_compare { + template + friend class btree; + + // Note: this `protected` is part of the API of std::map::value_compare. See + // https://en.cppreference.com/w/cpp/container/map/value_compare. + protected: + explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type &lhs, const value_type &rhs) const + -> decltype(comp(lhs.first, rhs.first)) { + return comp(lhs.first, rhs.first); + } +}; + +template +struct common_params : common_policy_traits { + using original_key_compare = Compare; + + // If Compare is a common comparator for a string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), + Compare, + typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || kIsKeyCompareStringAdapted; + + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + + using value_compare = + absl::conditional_t, + original_key_compare>; + using is_map_container = std::integral_constant; + + // For the given lookup key type, returns whether we can have multiple + // equivalent keys in the btree. If this is a multi-container, then we can. + // Otherwise, we can have multiple equivalent keys only if all of the + // following conditions are met: + // - The comparator is transparent. + // - The lookup key type is not the same as key_type. + // - The comparator is not a StringBtreeDefault{Less,Greater} comparator + // that we know has the same equivalence classes for all lookup types. + template + constexpr static bool can_have_multiple_equivalent_keys() { + return IsMulti || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); + } + + enum { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for slots. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + }; + + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > + (std::numeric_limits::max)()), + uint16_t, uint8_t>; // NOLINT +}; + +// An adapter class that converts a lower-bound compare into an upper-bound +// compare. Note: there is no need to make a version of this adapter specialized +// for key-compare-to functors because the upper-bound (the first value greater +// than the input) is never an exact match. +template +struct upper_bound_adapter { + explicit upper_bound_adapter(const Compare &c) : comp(c) {} + template + bool operator()(const K1 &a, const K2 &b) const { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; +}; + +enum class MatchKind : uint8_t { kEq, kNe }; + +template +struct SearchResult { + V value; + MatchKind match; + + static constexpr bool HasMatch() { return true; } + bool IsEq() const { return match == MatchKind::kEq; } +}; + +// When we don't use CompareTo, `match` is not present. +// This ensures that callers can't use it accidentally when it provides no +// useful information. +template +struct SearchResult { + SearchResult() {} + explicit SearchResult(V v) : value(v) {} + SearchResult(V v, MatchKind /*match*/) : value(v) {} + + V value; + + static constexpr bool HasMatch() { return false; } + static constexpr bool IsEq() { return false; } +}; + +// A node in the btree holding. The same node type is used for both internal +// and leaf nodes in the btree, though the nodes are allocated in such a way +// that the children array is only valid in internal nodes. +template +class btree_node { + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the comparator expresses a preference, use that. + // - If the key expresses a preference, use that. + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : std::is_arithmetic::value && + (std::is_same, + original_key_compare>::value || + std::is_same, + original_key_compare>::value)>; + + // This class is organized by absl::container_internal::Layout as if it had + // the following structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The index after the last populated value in `values`. Currently, this + // // is the same as the count of values. + // field_type finish; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeSlots values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeSlots for internal nodes. Only the values in + // // [start, finish) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for + // // internal nodes. + // btree_node *children[kNodeSlots + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const &) = delete; + btree_node &operator=(btree_node const &) = delete; + + protected: + btree_node() = default; + + private: + using layout_type = + absl::container_internal::Layout; + constexpr static size_type SizeWithNSlots(size_type n) { + return layout_type( + /*parent*/ 1, + /*generation*/ BtreeGenerationsEnabled() ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0) + .AllocSize(); + } + // A lower bound for the overhead of fields other than slots in a leaf node. + constexpr static size_type MinimumOverhead() { + return SizeWithNSlots(1) - sizeof(slot_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetSlots(const size_type begin, + const size_type end) { + return begin == end ? begin + : SizeWithNSlots((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); + } + + constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize; + constexpr static size_type kNodeTargetSlots = + NodeTargetSlots(0, kTargetNodeSize); + + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of + // 1/3 (for a node, not a b-tree). + constexpr static size_type kMinNodeSlots = 4; + + constexpr static size_type kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots; + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + constexpr static field_type kInternalNodeMaxCount = 0; + + constexpr static layout_type Layout(const size_type slot_count, + const size_type child_count) { + return layout_type( + /*parent*/ 1, + /*generation*/ BtreeGenerationsEnabled() ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ child_count); + } + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout( + const size_type slot_count = kNodeSlots) { + return Layout(slot_count, 0); + } + constexpr static layout_type InternalLayout() { + return Layout(kNodeSlots, kNodeSlots + 1); + } + constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) { + return LeafLayout(slot_count).AllocSize(); + } + constexpr static size_type InternalSize() { + return InternalLayout().AllocSize(); + } + + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType *GetField() { + // We assert that we don't read from values that aren't there. + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + template + inline const typename layout_type::template ElementType *GetField() const { + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer( + reinterpret_cast(this)); + } + void set_parent(btree_node *p) { *GetField<0>() = p; } + field_type &mutable_finish() { return GetField<2>()[2]; } + slot_type *slot(size_type i) { return &GetField<3>()[i]; } + slot_type *start_slot() { return slot(start()); } + slot_type *finish_slot() { return slot(finish()); } + const slot_type *slot(size_type i) const { return &GetField<3>()[i]; } + void set_position(field_type v) { GetField<2>()[0] = v; } + void set_start(field_type v) { GetField<2>()[1] = v; } + void set_finish(field_type v) { GetField<2>()[2] = v; } + // This method is only called by the node init methods. + void set_max_count(field_type v) { GetField<2>()[3] = v; } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const { return !is_leaf(); } + + // Getter for the position of this node in its parent. + field_type position() const { return GetField<2>()[0]; } + + // Getter for the offset of the first value in the `values` array. + field_type start() const { + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); + return 0; + } + + // Getter for the offset after the last value in the `values` array. + field_type finish() const { return GetField<2>()[2]; } + + // Getters for the number of values stored in this node. + field_type count() const { + assert(finish() >= start()); + return finish() - start(); + } + field_type max_count() const { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeSlots]. + const field_type max_count = GetField<2>()[3]; + return max_count == field_type{kInternalNodeMaxCount} + ? field_type{kNodeSlots} + : max_count; + } + + // Getter for the parent of this node. + btree_node *parent() const { return *GetField<0>(); } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const { return parent()->is_leaf(); } + void make_root() { + assert(parent()->is_root()); + set_generation(parent()->generation()); + set_parent(parent()->parent()); + } + + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t *get_root_generation() const { + assert(BtreeGenerationsEnabled()); + const btree_node *curr = this; + for (; !curr->is_root(); curr = curr->parent()) continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const { + return BtreeGenerationsEnabled() ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) { + if (BtreeGenerationsEnabled()) GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() { + if (BtreeGenerationsEnabled()) ++*get_root_generation(); + } + + // Getters for the key/value at position i in the node. + const key_type &key(size_type i) const { return params_type::key(slot(i)); } + reference value(size_type i) { return params_type::element(slot(i)); } + const_reference value(size_type i) const { + return params_type::element(slot(i)); + } + + // Getters/setter for the child at position i in the node. + btree_node *child(field_type i) const { return GetField<4>()[i]; } + btree_node *start_child() const { return child(start()); } + btree_node *&mutable_child(field_type i) { return GetField<4>()[i]; } + void clear_child(field_type i) { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child_noupdate_position(field_type i, btree_node *c) { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + } + void set_child(field_type i, btree_node *c) { + set_child_noupdate_position(i, c); + c->set_position(i); + } + void init_child(field_type i, btree_node *c) { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K &k, const key_compare &comp) const { + return use_linear_search::value ? linear_search(k, comp) + : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + size_type upper_bound(const K &k, const key_compare &comp) const { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value + : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K &k, const Compare &comp) const { + return linear_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K &k, const Compare &comp) const { + return binary_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K &k, size_type s, const size_type e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s < e) { + if (!comp(key(s), k)) { + break; + } + ++s; + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K &k, size_type s, const size_type e, const Compare &comp, + std::true_type /* IsCompareTo */) const { + while (s < e) { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) { + return {s, MatchKind::kEq}; + } else if (c > 0) { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K &k, size_type s, size_type e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s != e) { + const size_type mid = (s + e) >> 1; + if (comp(key(mid), k)) { + s = mid + 1; + } else { + e = mid; + } + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K &k, size_type s, size_type e, const CompareTo &comp, + std::true_type /* IsCompareTo */) const { + if (params_type::template can_have_multiple_equivalent_keys()) { + MatchKind exact_match = MatchKind::kNe; + while (s != e) { + const size_type mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else { + e = mid; + if (c == 0) { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if there could be + // multiple equivalent keys. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } else { // Can't have multiple equivalent keys. + while (s != e) { + const size_type mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else if (c > 0) { + e = mid; + } else { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Returns whether key i is ordered correctly with respect to the other keys + // in the node. The motivation here is to detect comparators that violate + // transitivity. Note: we only do comparisons of keys on this node rather than + // the whole tree so that this is constant time. + template + bool is_ordered_correctly(field_type i, const Compare &comp) const { + if (std::is_base_of::value || + params_type::kIsKeyCompareStringAdapted) { + return true; + } + + const auto compare = [&](field_type a, field_type b) { + const absl::weak_ordering cmp = + compare_internal::do_three_way_comparison(comp, key(a), key(b)); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; + }; + int cmp = -1; + constexpr bool kCanHaveEquivKeys = + params_type::template can_have_multiple_equivalent_keys(); + for (field_type j = start(); j < finish(); ++j) { + if (j == i) { + if (cmp > 0) return false; + continue; + } + int new_cmp = compare(j, i); + if (new_cmp < cmp || (!kCanHaveEquivKeys && new_cmp == 0)) return false; + cmp = new_cmp; + } + return true; + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(field_type i, allocator_type *alloc, Args &&...args); + + // Removes the values at positions [i, i + to_erase), shifting all existing + // values and children after that range to the left by to_erase. Clears all + // children between [i, i + to_erase). + void remove_values(field_type i, field_type to_erase, allocator_type *alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(field_type to_move, btree_node *right, + allocator_type *alloc); + void rebalance_left_to_right(field_type to_move, btree_node *right, + allocator_type *alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node *dest, allocator_type *alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself, and deleting the src node. + void merge(btree_node *src, allocator_type *alloc); + + // Node allocation/deletion routines. + void init_leaf(field_type position, field_type max_count, + btree_node *parent) { + set_generation(0); + set_parent(parent); + set_position(position); + set_start(0); + set_finish(0); + set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + start_slot(), max_count * sizeof(slot_type)); + } + void init_internal(field_type position, btree_node *parent) { + init_leaf(position, kNodeSlots, parent); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); + } + + static void deallocate(const size_type size, btree_node *node, + allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonMemoryRegion(node, size); + absl::container_internal::Deallocate(alloc, node, size); + } + + // Deletes a node and all of its children. + static void clear_and_delete(btree_node *node, allocator_type *alloc); + + private: + template + void value_init(const field_type i, allocator_type *alloc, Args &&...args) { + next_generation(); + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const field_type i, allocator_type *alloc) { + next_generation(); + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + void value_destroy_n(const field_type i, const field_type n, + allocator_type *alloc) { + next_generation(); + for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { + params_type::destroy(alloc, s); + absl::container_internal::SanitizerPoisonObject(s); + } + } + + static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonObject(dest); + params_type::transfer(alloc, dest, src); + absl::container_internal::SanitizerPoisonObject(src); + } + + // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, + btree_node *src_node, allocator_type *alloc) { + next_generation(); + transfer(slot(dest_i), src_node->slot(src_i), alloc); + } + + // Transfers `n` values starting at value `src_i` in `src_node` into the + // values starting at value `dest_i` in `this`. + void transfer_n(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i), *end = src + n, + *dest = slot(dest_i); + src != end; ++src, ++dest) { + transfer(dest, src, alloc); + } + } + + // Same as above, except that we start at the end and work our way to the + // beginning. + void transfer_n_backward(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i + n), *end = src - n, + *dest = slot(dest_i + n); + src != end; --src, --dest) { + // If we modified the loop index calculations above to avoid the -1s here, + // it would result in UB in the computation of `end` (and possibly `src` + // as well, if n == 0), since slot() is effectively an array index and it + // is UB to compute the address of any out-of-bounds array element except + // for one-past-the-end. + transfer(dest - 1, src - 1, alloc); + } + } + + template + friend class btree; + template + friend class btree_iterator; + friend class BtreeNodePeer; + friend struct btree_access; +}; + +template +bool AreNodesFromSameContainer(const Node *node_a, const Node *node_b) { + // If either node is null, then give up on checking whether they're from the + // same container. (If exactly one is null, then we'll trigger the + // default-constructed assert in Equals.) + if (node_a == nullptr || node_b == nullptr) return true; + while (!node_a->is_root()) node_a = node_a->parent(); + while (!node_b->is_root()) node_b = node_b->parent(); + return node_a == node_b; +} + +class btree_iterator_generation_info_enabled { + public: + explicit btree_iterator_generation_info_enabled(uint32_t g) + : generation_(g) {} + + // Updates the generation. For use internally right before we return an + // iterator to the user. + template + void update_generation(const Node *node) { + if (node != nullptr) generation_ = node->generation(); + } + uint32_t generation() const { return generation_; } + + template + void assert_valid_generation(const Node *node) const { + if (node != nullptr && node->generation() != generation_) { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed."); + } + } + + private: + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; +}; + +class btree_iterator_generation_info_disabled { + public: + explicit btree_iterator_generation_info_disabled(uint32_t) {} + static void update_generation(const void *) {} + static uint32_t generation() { return 0; } + static void assert_valid_generation(const void *) {} +}; + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +using btree_iterator_generation_info = btree_iterator_generation_info_enabled; +#else +using btree_iterator_generation_info = btree_iterator_generation_info_disabled; +#endif + +template +class btree_iterator : private btree_iterator_generation_info { + using field_type = typename Node::field_type; + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + using is_map_container = typename params_type::is_map_container; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + // In sets, all iterators are const. + using iterator = absl::conditional_t< + is_map_container::value, + btree_iterator, + btree_iterator>; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : btree_iterator(nullptr, -1) {} + explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} + btree_iterator(Node *n, int p) + : btree_iterator_generation_info(n != nullptr ? n->generation() + : ~uint32_t{}), + node_(n), + position_(p) {} + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids hiding the copy constructor so + // that the trivial one will be used when possible. + template , iterator>::value && + std::is_same::value, + int> = 0> + btree_iterator(const btree_iterator other) // NOLINT + : btree_iterator_generation_info(other), + node_(other.node_), + position_(other.position_) {} + + bool operator==(const iterator &other) const { + return Equals(other); + } + bool operator==(const const_iterator &other) const { + return Equals(other); + } + bool operator!=(const iterator &other) const { + return !Equals(other); + } + bool operator!=(const const_iterator &other) const { + return !Equals(other); + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists. + difference_type operator-(const_iterator other) const { + if (node_ == other.node_) { + if (node_->is_leaf()) return position_ - other.position_; + if (position_ == other.position_) return 0; + } + return distance_slow(other); + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const { + ABSL_HARDENING_ASSERT(node_ != nullptr); + assert_valid_generation(node_); + ABSL_HARDENING_ASSERT(position_ >= node_->start()); + if (position_ >= node_->finish()) { + ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator"); + ABSL_HARDENING_ASSERT(position_ < node_->finish()); + } + return node_->value(static_cast(position_)); + } + pointer operator->() const { return &operator*(); } + + btree_iterator &operator++() { + increment(); + return *this; + } + btree_iterator &operator--() { + decrement(); + return *this; + } + btree_iterator operator++(int) { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + friend iterator; + friend const_iterator; + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend class base_checker; + friend struct btree_access; + + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator other) + : btree_iterator_generation_info(other.generation()), + node_(const_cast(other.node_)), + position_(other.position_) {} + + bool Equals(const const_iterator other) const { + ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) || + (node_ != nullptr && other.node_ != nullptr)) && + "Comparing default-constructed iterator with " + "non-default-constructed iterator."); + // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this + // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where + // N/M are sizes of the containers containing node_/other.node_. + assert(AreNodesFromSameContainer(node_, other.node_) && + "Comparing iterators from different containers."); + assert_valid_generation(node_); + other.assert_valid_generation(other.node_); + return node_ == other.node_ && position_ == other.position_; + } + + bool IsEndIterator() const { + if (position_ != node_->finish()) return false; + node_type *node = node_; + while (!node->is_root()) { + if (node->position() != node->parent()->finish()) return false; + node = node->parent(); + } + return true; + } + + // Returns n such that n calls to ++other yields *this. + // Precondition: n exists && (this->node_ != other.node_ || + // !this->node_->is_leaf() || this->position_ != other.position_). + difference_type distance_slow(const_iterator other) const; + + // Increment/decrement the iterator. + void increment() { + assert_valid_generation(node_); + if (node_->is_leaf() && ++position_ < node_->finish()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + assert_valid_generation(node_); + if (node_->is_leaf() && --position_ >= node_->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + const key_type &key() const { + return node_->key(static_cast(position_)); + } + decltype(std::declval()->slot(0)) slot() { + return node_->slot(static_cast(position_)); + } + + void update_generation() { + btree_iterator_generation_info::update_generation(node_); + } + + // The node in the tree the iterator is pointing at. + Node *node_; + // The position within the node of the tree the iterator is pointing at. + // NOTE: this is an int rather than a field_type because iterators can point + // to invalid positions (such as -1) in certain circumstances. + int position_; +}; + +template +class btree { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename node_type::field_type; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct EmptyNodeType : node_type { + using field_type = typename node_type::field_type; + node_type *parent; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + uint32_t generation = 0; +#endif + field_type position = 0; + field_type start = 0; + field_type finish = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + + constexpr EmptyNodeType() : parent(this) {} + }; + + static node_type *EmptyNode() { + alignas(node_type::Alignment()) static constexpr EmptyNodeType empty_node; + return const_cast(&empty_node); + } + + enum : uint32_t { + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, + }; + + struct node_stats { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} + + node_stats &operator+=(const node_stats &other) { + leaf_nodes += other.leaf_nodes; + internal_nodes += other.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using original_key_compare = typename Params::original_key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = + typename btree_iterator::iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // Copies or moves (depending on the template parameter) the values in + // other into this btree in their order in other. This btree must be empty + // before this method is called. This method is used in copy construction, + // copy assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree &other); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare &comp, const allocator_type &alloc) + : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} + + btree(const btree &other) : btree(other, other.allocator()) {} + btree(const btree &other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + copy_or_move_values_in_order(other); + } + btree(btree &&other) noexcept + : root_(absl::exchange(other.root_, EmptyNode())), + rightmost_(std::move(other.rightmost_)), + size_(absl::exchange(other.size_, 0u)) { + other.mutable_rightmost() = EmptyNode(); + } + btree(btree &&other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + if (alloc == other.allocator()) { + swap(other); + } else { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } + + ~btree() { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of other to *this. + btree &operator=(const btree &other); + btree &operator=(btree &&other) noexcept; + + iterator begin() { return iterator(leftmost()); } + const_iterator begin() const { return const_iterator(leftmost()); } + iterator end() { return iterator(rightmost(), rightmost()->finish()); } + const_iterator end() const { + return const_iterator(rightmost(), rightmost()->finish()); + } + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than `key`. + template + iterator lower_bound(const K &key) { + return internal_end(internal_lower_bound(key).value); + } + template + const_iterator lower_bound(const K &key) const { + return internal_end(internal_lower_bound(key).value); + } + + // Finds the first element whose key is not less than `key` and also returns + // whether that element is equal to `key`. + template + std::pair lower_bound_equal(const K &key) const; + + // Finds the first element whose key is greater than `key`. + template + iterator upper_bound(const K &key) { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K &key) const { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member of the + // pair is equal to upper_bound(key). + template + std::pair equal_range(const K &key); + template + std::pair equal_range(const K &key) const { + return const_cast(this)->equal_range(key); + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const K &key, Args &&...args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, const K &key, + Args &&...args); + + // Insert a range of values into the btree. + // Note: the first overload avoids constructing a value_type if the key + // already exists in the btree. + template ()( + params_type::key(*std::declval()), + std::declval()))> + void insert_iterator_unique(InputIterator b, InputIterator e, int); + // We need the second overload for cases in which we need to construct a + // value_type in order to compare it with the keys already in the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e, char); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type &key, ValueType &&v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType &&v) { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType &&v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, + InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase_range(iterator begin, iterator end); + + // Finds an element with key equivalent to `key` or returns `end()` if `key` + // is not present. + template + iterator find(const K &key) { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K &key) const { + return internal_end(internal_find(key)); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swaps the contents of `this` and `other`. + void swap(btree &other); + + const key_compare &key_comp() const noexcept { + return rightmost_.template get<0>(); + } + template + bool compare_keys(const K1 &a, const K2 &b) const { + return compare_internal::compare_result_as_less_than(key_comp()(a, b)); + } + + value_compare value_comp() const { + return value_compare(original_key_compare(key_comp())); + } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const { return size_; } + size_type max_size() const { return (std::numeric_limits::max)(); } + bool empty() const { return size_ == 0; } + + // The height of the btree. An empty tree will have height 0. + size_type height() const { + size_type h = 0; + if (!empty()) { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type *n = root(); + do { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; } + size_type internal_nodes() const { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. + size_type bytes_used() const { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { + return sizeof(*this) + node_type::LeafSize(root()->max_count()); + } else { + return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree assuming + // random insertion order. + static double average_bytes_per_value() { + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const { + if (empty()) return 0.0; + return static_cast(size()) / (nodes() * kNodeSlots); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const { + if (empty()) return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return allocator(); } + + private: + friend struct btree_access; + + // Internal accessor routines. + node_type *root() { return root_; } + const node_type *root() const { return root_; } + node_type *&mutable_root() noexcept { return root_; } + node_type *rightmost() { return rightmost_.template get<2>(); } + const node_type *rightmost() const { return rightmost_.template get<2>(); } + node_type *&mutable_rightmost() noexcept { + return rightmost_.template get<2>(); + } + key_compare *mutable_key_comp() noexcept { + return &rightmost_.template get<0>(); + } + + // The leftmost node is stored as the parent of the root node. + node_type *leftmost() { return root()->parent(); } + const node_type *leftmost() const { return root()->parent(); } + + // Allocator routines. + allocator_type *mutable_allocator() noexcept { + return &rightmost_.template get<1>(); + } + const allocator_type &allocator() const noexcept { + return rightmost_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type *allocate(size_type size) { + return reinterpret_cast( + absl::container_internal::Allocate( + mutable_allocator(), size)); + } + + // Node creation/deletion routines. + node_type *new_internal_node(field_type position, node_type *parent) { + node_type *n = allocate(node_type::InternalSize()); + n->init_internal(position, parent); + return n; + } + node_type *new_leaf_node(field_type position, node_type *parent) { + node_type *n = allocate(node_type::LeafSize()); + n->init_leaf(position, kNodeSlots, parent); + return n; + } + node_type *new_leaf_root_node(field_type max_count) { + node_type *n = allocate(node_type::LeafSize(max_count)); + n->init_leaf(/*position=*/0, max_count, /*parent=*/n); + return n; + } + + // Deletion helper routines. + iterator rebalance_after_delete(iterator iter); + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator *iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type *left, node_type *right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator *iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) { + return iter.node_ != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const { + return iter.node_ != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args &&...args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location such + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree, unless there is an exact match - in which case, the + // result may not be on a leaf. When there's a three-way comparator, we can + // return whether there was an exact match. This allows the caller to avoid a + // subsequent comparison to determine if an exact match was made, which is + // important for keys with expensive comparison, such as strings. + template + SearchResult internal_locate( + const K &key) const; + + // Internal routine which implements lower_bound(). + template + SearchResult internal_lower_bound( + const K &key) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K &key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K &key) const; + + // Verifies the tree structure of node. + size_type internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const; + + node_stats internal_stats(const node_type *node) const { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) { + return node_stats(0, 0); + } + if (node->is_leaf()) { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = node->start(); i <= node->finish(); ++i) { + res += internal_stats(node->child(i)); + } + return res; + } + + node_type *root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. We use compressed tuple in order to save space because + // key_compare and allocator_type are usually empty. + absl::container_internal::CompressedTuple + rightmost_; + + // Number of values. + size_type size_; +}; + +//// +// btree_node methods +template +template +inline void btree_node

::emplace_value(const field_type i, + allocator_type *alloc, + Args &&...args) { + assert(i >= start()); + assert(i <= finish()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < finish()) { + transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, + alloc); + } + value_init(static_cast(i), alloc, std::forward(args)...); + set_finish(finish() + 1); + + if (is_internal() && finish() > i + 1) { + for (field_type j = finish(); j > i + 1; --j) { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } +} + +template +inline void btree_node

::remove_values(const field_type i, + const field_type to_erase, + allocator_type *alloc) { + // Transfer values after the removed range into their new places. + value_destroy_n(i, to_erase, alloc); + const field_type orig_finish = finish(); + const field_type src_i = i + to_erase; + transfer_n(orig_finish - src_i, i, src_i, this, alloc); + + if (is_internal()) { + // Delete all children between begin and end. + for (field_type j = 0; j < to_erase; ++j) { + clear_and_delete(child(i + j + 1), alloc); + } + // Rotate children after end into new positions. + for (field_type j = i + to_erase + 1; j <= orig_finish; ++j) { + set_child(j - to_erase, child(j)); + clear_child(j); + } + } + set_finish(orig_finish - to_erase); +} + +template +void btree_node

::rebalance_right_to_left(field_type to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + transfer(finish(), position(), parent(), alloc); + + // 2) Move the (to_move - 1) values from the right node to the left node. + transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + parent()->transfer(position(), right->start() + to_move - 1, right, alloc); + + // 4) Shift the values in the right node to their correct positions. + right->transfer_n(right->count() - to_move, right->start(), + right->start() + to_move, right, alloc); + + if (is_internal()) { + // Move the child pointers from the right to the left node. + for (field_type i = 0; i < to_move; ++i) { + init_child(finish() + i + 1, right->child(i)); + } + for (field_type i = right->start(); i <= right->finish() - to_move; ++i) { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup `finish` on the left and right nodes. + set_finish(finish() + to_move); + right->set_finish(right->finish() - to_move); +} + +template +void btree_node

::rebalance_left_to_right(field_type to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + // 1) Shift existing values in the right node to their correct positions. + right->transfer_n_backward(right->count(), right->start() + to_move, + right->start(), right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->transfer(right->start() + to_move - 1, position(), parent(), alloc); + + // 3) Move the (to_move - 1) values from the left node to the right node. + right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, + alloc); + + // 4) Move the new delimiting value to the parent from the left node. + parent()->transfer(position(), finish() - to_move, this, alloc); + + if (is_internal()) { + // Move the child pointers from the left to the right node. + for (field_type i = right->finish() + 1; i > right->start(); --i) { + right->init_child(i - 1 + to_move, right->child(i - 1)); + right->clear_child(i - 1); + } + for (field_type i = 1; i <= to_move; ++i) { + right->init_child(i - 1, child(finish() - to_move + i)); + clear_child(finish() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_finish(finish() - to_move); + right->set_finish(right->finish() + to_move); +} + +template +void btree_node

::split(const int insert_position, btree_node *dest, + allocator_type *alloc) { + assert(dest->count() == 0); + assert(max_count() == kNodeSlots); + assert(position() + 1 == dest->position()); + assert(parent() == dest->parent()); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == start()) { + dest->set_finish(dest->start() + finish() - 1); + } else if (insert_position == kNodeSlots) { + dest->set_finish(dest->start()); + } else { + dest->set_finish(dest->start() + count() / 2); + } + set_finish(finish() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); + + // The split key is the largest value in the left sibling. + --mutable_finish(); + parent()->emplace_value(position(), alloc, finish_slot()); + value_destroy(finish(), alloc); + parent()->set_child_noupdate_position(position() + 1, dest); + + if (is_internal()) { + for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish(); + ++i, ++j) { + assert(child(j) != nullptr); + dest->init_child(i, child(j)); + clear_child(j); + } + } +} + +template +void btree_node

::merge(btree_node *src, allocator_type *alloc) { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + transfer_n(src->count(), finish() + 1, src->start(), src, alloc); + + if (is_internal()) { + // Move the child pointers from the right to the left node. + for (field_type i = src->start(), j = finish() + 1; i <= src->finish(); + ++i, ++j) { + init_child(j, src->child(i)); + src->clear_child(i); + } + } + + // Fixup `finish` on the src and dest nodes. + set_finish(start() + 1 + count() + src->count()); + src->set_finish(src->start()); + + // Remove the value on the parent node and delete the src node. + parent()->remove_values(position(), /*to_erase=*/1, alloc); +} + +template +void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { + if (node->is_leaf()) { + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(LeafSize(node->max_count()), node, alloc); + return; + } + if (node->count() == 0) { + deallocate(InternalSize(), node, alloc); + return; + } + + // The parent of the root of the subtree we are deleting. + btree_node *delete_root_parent = node->parent(); + + // Navigate to the leftmost leaf under node, and then delete upwards. + while (node->is_internal()) node = node->start_child(); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node *leftmost_leaf = node; +#endif + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); + btree_node *parent = node->parent(); + for (;;) { + // In each iteration of the next loop, we delete one leaf node and go right. + assert(pos <= parent->finish()); + do { + node = parent->child(static_cast(pos)); + if (node->is_internal()) { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + node->value_destroy_n(node->start(), node->count(), alloc); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (leftmost_leaf != node) +#endif + deallocate(LeafSize(node->max_count()), node, alloc); + ++pos; + } while (pos <= parent->finish()); + + // Once we've deleted all children of parent, delete parent and go up/right. + assert(pos > parent->finish()); + do { + node = parent; + pos = node->position(); + parent = node->parent(); + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(InternalSize(), node, alloc); + if (parent == delete_root_parent) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); +#endif + return; + } + ++pos; + } while (pos > parent->finish()); + } +} + +//// +// btree_iterator methods + +// Note: the implementation here is based on btree_node::clear_and_delete. +template +auto btree_iterator::distance_slow(const_iterator other) const + -> difference_type { + const_iterator begin = other; + const_iterator end = *this; + assert(begin.node_ != end.node_ || !begin.node_->is_leaf() || + begin.position_ != end.position_); + + const node_type *node = begin.node_; + // We need to compensate for double counting if begin.node_ is a leaf node. + difference_type count = node->is_leaf() ? -begin.position_ : 0; + + // First navigate to the leftmost leaf node past begin. + if (node->is_internal()) { + ++count; + node = node->child(begin.position_ + 1); + } + while (node->is_internal()) node = node->start_child(); + + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); + const node_type *parent = node->parent(); + for (;;) { + // In each iteration of the next loop, we count one leaf node and go right. + assert(pos <= parent->finish()); + do { + node = parent->child(static_cast(pos)); + if (node->is_internal()) { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + if (node == end.node_) return count + end.position_; + if (parent == end.node_ && pos == static_cast(end.position_)) + return count + node->count(); + // +1 is for the next internal node value. + count += node->count() + 1; + ++pos; + } while (pos <= parent->finish()); + + // Once we've counted all children of parent, go up/right. + assert(pos > parent->finish()); + do { + node = parent; + pos = node->position(); + parent = node->parent(); + // -1 because we counted the value at end and shouldn't. + if (parent == end.node_ && pos == static_cast(end.position_)) + return count - 1; + ++pos; + } while (pos > parent->finish()); + } +} + +template +void btree_iterator::increment_slow() { + if (node_->is_leaf()) { + assert(position_ >= node_->finish()); + btree_iterator save(*this); + while (position_ == node_->finish() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't incrementing end() instead of handling. + if (position_ == node_->finish()) { + *this = save; + } + } else { + assert(position_ < node_->finish()); + node_ = node_->child(static_cast(position_ + 1)); + while (node_->is_internal()) { + node_ = node_->start_child(); + } + position_ = node_->start(); + } +} + +template +void btree_iterator::decrement_slow() { + if (node_->is_leaf()) { + assert(position_ <= -1); + btree_iterator save(*this); + while (position_ < node_->start() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't decrementing begin() instead of handling. + if (position_ < node_->start()) { + *this = save; + } + } else { + assert(position_ >= node_->start()); + node_ = node_->child(static_cast(position_)); + while (node_->is_internal()) { + node_ = node_->child(node_->finish()); + } + position_ = node_->finish() - 1; + } +} + +//// +// btree methods +template +template +void btree

::copy_or_move_values_in_order(Btree &other) { + static_assert(std::is_same::value || + std::is_same::value, + "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = other.begin(); + if (iter == other.end()) return; + insert_multi(iter.slot()); + ++iter; + for (; iter != other.end(); ++iter) { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), iter.slot()); + } +} + +template +constexpr bool btree

::static_assert_validation() { + static_assert(std::is_nothrow_copy_constructible::value, + "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, + "Allocator must be nothrow copy constructible"); + static_assert(std::is_trivially_copyable::value, + "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large"); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + static_assert( + compare_has_valid_result_type(), + "key comparison function must return absl::{weak,strong}_ordering or " + "bool."); + + // Test the assumption made in setting kNodeSlotSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, + "node space assumption incorrect"); + + return true; +} + +template +template +auto btree

::lower_bound_equal(const K &key) const + -> std::pair { + const SearchResult res = + internal_lower_bound(key); + const iterator lower = iterator(internal_end(res.value)); + const bool equal = res.HasMatch() + ? res.IsEq() + : lower != end() && !compare_keys(key, lower.key()); + return {lower, equal}; +} + +template +template +auto btree

::equal_range(const K &key) -> std::pair { + const std::pair lower_and_equal = lower_bound_equal(key); + const iterator lower = lower_and_equal.first; + if (!lower_and_equal.second) { + return {lower, lower}; + } + + const iterator next = std::next(lower); + if (!params_type::template can_have_multiple_equivalent_keys()) { + // The next iterator after lower must point to a key greater than `key`. + // Note: if this assert fails, then it may indicate that the comparator does + // not meet the equivalence requirements for Compare + // (see https://en.cppreference.com/w/cpp/named_req/Compare). + assert(next == end() || compare_keys(key, next.key())); + return {lower, next}; + } + // Try once more to avoid the call to upper_bound() if there's only one + // equivalent key. This should prevent all calls to upper_bound() in cases of + // unique-containers with heterogeneous comparators in which all comparison + // operators have the same equivalence classes. + if (next == end() || compare_keys(key, next.key())) return {lower, next}; + + // In this case, we need to call upper_bound() to avoid worst case O(N) + // behavior if we were to iterate over equal keys. + return {lower, upper_bound(key)}; +} + +template +template +auto btree

::insert_unique(const K &key, Args &&...args) + -> std::pair { + if (empty()) { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + SearchResult res = internal_locate(key); + iterator iter = res.value; + + if (res.HasMatch()) { + if (res.IsEq()) { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } else { + iterator last = internal_last(iter); + if (last.node_ && !compare_keys(key, last.key())) { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; +} + +template +template +inline auto btree

::insert_hint_unique(iterator position, const K &key, + Args &&...args) + -> std::pair { + if (!empty()) { + if (position == end() || compare_keys(key, position.key())) { + if (position == begin() || compare_keys(std::prev(position).key(), key)) { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else if (compare_keys(position.key(), key)) { + ++position; + if (position == end() || compare_keys(key, position.key())) { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); +} + +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) { + for (; b != e; ++b) { + insert_hint_unique(end(), params_type::key(*b), *b); + } +} + +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { + for (; b != e; ++b) { + // Use a node handle to manage a temp slot. + auto node_handle = + CommonAccess::Construct(get_allocator(), *b); + slot_type *slot = CommonAccess::GetSlot(node_handle); + insert_hint_unique(end(), params_type::key(slot), slot); + } +} + +template +template +auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { + if (empty()) { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node_ == nullptr) { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); +} + +template +template +auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { + if (!empty()) { + const key_type &key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) { + if (position == begin() || + !compare_keys(key, std::prev(position).key())) { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } else { + ++position; + if (position == end() || !compare_keys(position.key(), key)) { + // {original `position`}.key() < key < {current `position`}.key() + return internal_emplace(position, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); +} + +template +template +void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_multi(end(), *b); + } +} + +template +auto btree

::operator=(const btree &other) -> btree & { + if (this != &other) { + clear(); + + *mutable_key_comp() = other.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + *mutable_allocator() = other.allocator(); + } + + copy_or_move_values_in_order(other); + } + return *this; +} + +template +auto btree

::operator=(btree &&other) noexcept -> btree & { + if (this != &other) { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_move_assignment::value) { + swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); + } else { + if (allocator() == other.allocator()) { + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(size_, other.size_); + } else { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `other` and `this` to have `other`s key + // comparator while moving the values so we can't swap the key + // comparators. + *mutable_key_comp() = other.key_comp(); + copy_or_move_values_in_order(other); + } + } + } + return *this; +} + +template +auto btree

::erase(iterator iter) -> iterator { + iter.node_->value_destroy(static_cast(iter.position_), + mutable_allocator()); + iter.update_generation(); + + const bool internal_delete = iter.node_->is_internal(); + if (internal_delete) { + // Deletion of a value on an internal node. First, transfer the largest + // value from our left child here, then erase/rebalance from that position. + // We can get to the largest value from our left child by decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node_->is_leaf()); + internal_iter.node_->transfer( + static_cast(internal_iter.position_), + static_cast(iter.position_), iter.node_, + mutable_allocator()); + } else { + // Shift values after erased position in leaf. In the internal case, we + // don't need to do this because the leaf position is the end of the node. + const field_type transfer_from = + static_cast(iter.position_ + 1); + const field_type num_to_transfer = iter.node_->finish() - transfer_from; + iter.node_->transfer_n(num_to_transfer, + static_cast(iter.position_), + transfer_from, iter.node_, mutable_allocator()); + } + // Update node finish and container size. + iter.node_->set_finish(iter.node_->finish() - 1); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node_) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) { + ++res; + } + return res; +} + +template +auto btree

::rebalance_after_delete(iterator iter) -> iterator { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) { + if (iter.node_ == root()) { + try_shrink(); + if (empty()) { + return end(); + } + break; + } + if (iter.node_->count() >= kMinNodeValues) { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) { + res = iter; + first_iteration = false; + } + if (!merged) { + break; + } + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + } + res.update_generation(); + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position_ == res.node_->finish()) { + res.position_ = res.node_->finish() - 1; + ++res; + } + + return res; +} + +// Note: we tried implementing this more efficiently by erasing all of the +// elements in [begin, end) at once and then doing rebalancing once at the end +// (rather than interleaving deletion and rebalancing), but that adds a lot of +// complexity, which seems to outweigh the performance win. +template +auto btree

::erase_range(iterator begin, iterator end) + -> std::pair { + size_type count = static_cast(end - begin); + assert(count >= 0); + + if (count == 0) { + return {0, begin}; + } + + if (static_cast(count) == size_) { + clear(); + return {count, this->end()}; + } + + if (begin.node_ == end.node_) { + assert(end.position_ > begin.position_); + begin.node_->remove_values( + static_cast(begin.position_), + static_cast(end.position_ - begin.position_), + mutable_allocator()); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) { + if (begin.node_->is_leaf()) { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = + static_cast(begin.node_->finish() - begin.position_); + const field_type to_erase = static_cast( + (std::min)(remaining_to_erase, remaining_in_node)); + begin.node_->remove_values(static_cast(begin.position_), + to_erase, mutable_allocator()); + size_ -= to_erase; + begin = rebalance_after_delete(begin); + } else { + begin = erase(begin); + } + } + begin.update_generation(); + return {count, begin}; +} + +template +void btree

::clear() { + if (!empty()) { + node_type::clear_and_delete(root(), mutable_allocator()); + } + mutable_root() = mutable_rightmost() = EmptyNode(); + size_ = 0; +} + +template +void btree

::swap(btree &other) { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) { + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + } else { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == other.allocator()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + } + swap(mutable_root(), other.mutable_root()); + swap(size_, other.size_); +} + +template +void btree

::verify() const { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost() != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost()->is_leaf()); +} + +template +void btree

::rebalance_or_split(iterator *iter) { + node_type *&node = iter->node_; + int &insert_position = iter->position_; + assert(node->count() == node->max_count()); + assert(kNodeSlots == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type *parent = node->parent(); + if (node != root()) { + if (node->position() > parent->start()) { + // Try rebalancing with our left sibling. + node_type *left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + field_type to_move = + (kNodeSlots - left->count()) / + (1 + (static_cast(insert_position) < kNodeSlots)); + to_move = (std::max)(field_type{1}, to_move); + + if (static_cast(insert_position) - to_move >= + node->start() || + left->count() + to_move < kNodeSlots) { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = static_cast( + static_cast(insert_position) - to_move); + if (insert_position < node->start()) { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->finish()) { + // Try rebalancing with our right sibling. + node_type *right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + field_type to_move = (kNodeSlots - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(field_type{1}, to_move); + + if (static_cast(insert_position) <= + node->finish() - to_move || + right->count() + to_move < kNodeSlots) { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { + iterator parent_iter(parent, node->position()); + rebalance_or_split(&parent_iter); + parent = node->parent(); + } + } else { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(/*position=*/0, parent); + parent->set_generation(root()->generation()); + parent->init_child(parent->start(), node); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(parent->start_child()->is_internal() || + parent->start_child() == rightmost()); + } + + // Split the node. + node_type *split_node; + if (node->is_leaf()) { + split_node = new_leaf_node(node->position() + 1, parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost() == node) mutable_rightmost() = split_node; + } else { + split_node = new_internal_node(node->position() + 1, parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = split_node; + } +} + +template +void btree

::merge_nodes(node_type *left, node_type *right) { + left->merge(right, mutable_allocator()); + if (rightmost() == right) mutable_rightmost() = left; +} + +template +bool btree

::try_merge_or_rebalance(iterator *iter) { + node_type *parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) { + // Try merging with our left sibling. + node_type *left = parent->child(iter->node_->position() - 1); + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node_->count() <= kNodeSlots) { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; + return true; + } + } + if (iter->node_->position() < parent->finish()) { + // Try merging with our right sibling. + node_type *right = parent->child(iter->node_->position() + 1); + assert(right->max_count() == kNodeSlots); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) { + merge_nodes(iter->node_, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if (right->count() > kMinNodeValues && + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { + field_type to_move = (right->count() - iter->node_->count()) / 2; + to_move = + (std::min)(to_move, static_cast(right->count() - 1)); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node_->position() > parent->start()) { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type *left = parent->child(iter->node_->position() - 1); + if (left->count() > kMinNodeValues && + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) { + field_type to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, static_cast(left->count() - 1)); + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; + return false; + } + } + return false; +} + +template +void btree

::try_shrink() { + node_type *orig_root = root(); + if (orig_root->count() > 0) { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (orig_root->is_leaf()) { + assert(size() == 0); + mutable_root() = mutable_rightmost() = EmptyNode(); + } else { + node_type *child = orig_root->start_child(); + child->make_root(); + mutable_root() = child; + } + node_type::clear_and_delete(orig_root, mutable_allocator()); +} + +template +template +inline IterType btree

::internal_last(IterType iter) { + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) { + iter.node_ = nullptr; + break; + } + } + iter.update_generation(); + return iter; +} + +template +template +inline auto btree

::internal_emplace(iterator iter, Args &&...args) + -> iterator { + if (iter.node_->is_internal()) { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position_; + } + const field_type max_count = iter.node_->max_count(); + allocator_type *alloc = mutable_allocator(); + + const auto transfer_and_delete = [&](node_type *old_node, + node_type *new_node) { + new_node->transfer_n(old_node->count(), new_node->start(), + old_node->start(), old_node, alloc); + new_node->set_finish(old_node->finish()); + old_node->set_finish(old_node->start()); + new_node->set_generation(old_node->generation()); + node_type::clear_and_delete(old_node, alloc); + }; + const auto replace_leaf_root_node = [&](field_type new_node_size) { + assert(iter.node_ == root()); + node_type *old_root = iter.node_; + node_type *new_root = iter.node_ = new_leaf_root_node(new_node_size); + transfer_and_delete(old_root, new_root); + mutable_root() = mutable_rightmost() = new_root; + }; + + bool replaced_node = false; + if (iter.node_->count() == max_count) { + // Make room in the leaf for the new item. + if (max_count < kNodeSlots) { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + replace_leaf_root_node(static_cast( + (std::min)(static_cast(kNodeSlots), 2 * max_count))); + replaced_node = true; + } else { + rebalance_or_split(&iter); + } + } + (void)replaced_node; +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) + if (!replaced_node) { + assert(iter.node_->is_leaf()); + if (iter.node_->is_root()) { + replace_leaf_root_node(max_count); + } else { + node_type *old_node = iter.node_; + const bool was_rightmost = rightmost() == old_node; + const bool was_leftmost = leftmost() == old_node; + node_type *parent = old_node->parent(); + const field_type position = old_node->position(); + node_type *new_node = iter.node_ = new_leaf_node(position, parent); + parent->set_child_noupdate_position(position, new_node); + transfer_and_delete(old_node, new_node); + if (was_rightmost) mutable_rightmost() = new_node; + // The leftmost node is stored as the parent of the root node. + if (was_leftmost) root()->set_parent(new_node); + } + } +#endif + iter.node_->emplace_value(static_cast(iter.position_), alloc, + std::forward(args)...); + assert( + iter.node_->is_ordered_correctly(static_cast(iter.position_), + original_key_compare(key_comp())) && + "If this assert fails, then either (1) the comparator may violate " + "transitivity, i.e. comp(a,b) && comp(b,c) -> comp(a,c) (see " + "https://en.cppreference.com/w/cpp/named_req/Compare), or (2) a " + "key may have been mutated after it was inserted into the tree."); + ++size_; + iter.update_generation(); + return iter; +} + +template +template +inline auto btree

::internal_locate(const K &key) const + -> SearchResult { + iterator iter(const_cast(root())); + for (;;) { + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); + if (res.IsEq()) { + return {iter, MatchKind::kEq}; + } + // Note: in the non-key-compare-to case, we don't need to walk all the way + // down the tree if the keys are equal, but determining equality would + // require doing an extra comparison on each node on the way down, and we + // will need to go all the way to the leaf node in the expected case. + if (iter.node_->is_leaf()) { + break; + } + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + // Note: in the non-key-compare-to case, the key may actually be equivalent + // here (and the MatchKind::kNe is ignored). + return {iter, MatchKind::kNe}; +} + +template +template +auto btree

::internal_lower_bound(const K &key) const + -> SearchResult { + if (!params_type::template can_have_multiple_equivalent_keys()) { + SearchResult ret = internal_locate(key); + ret.value = internal_last(ret.value); + return ret; + } + iterator iter(const_cast(root())); + SearchResult res; + bool seen_eq = false; + for (;;) { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = static_cast(res.value); + if (iter.node_->is_leaf()) { + break; + } + seen_eq = seen_eq || res.IsEq(); + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + if (res.IsEq()) return {iter, MatchKind::kEq}; + return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; +} + +template +template +auto btree

::internal_upper_bound(const K &key) const -> iterator { + iterator iter(const_cast(root())); + for (;;) { + iter.position_ = static_cast(iter.node_->upper_bound(key, key_comp())); + if (iter.node_->is_leaf()) { + break; + } + iter.node_ = iter.node_->child(static_cast(iter.position_)); + } + return internal_last(iter); +} + +template +template +auto btree

::internal_find(const K &key) const -> iterator { + SearchResult res = internal_locate(key); + if (res.HasMatch()) { + if (res.IsEq()) { + return res.value; + } + } else { + const iterator iter = internal_last(res.value); + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { + return iter; + } + } + return {nullptr, 0}; +} + +template +typename btree

::size_type btree

::internal_verify( + const node_type *node, const key_type *lo, const key_type *hi) const { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) { + assert(!compare_keys(node->key(node->start()), *lo)); + } + if (hi) { + assert(!compare_keys(*hi, node->key(node->finish() - 1))); + } + for (int i = node->start() + 1; i < node->finish(); ++i) { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + size_type count = node->count(); + if (node->is_internal()) { + for (field_type i = node->start(); i <= node->finish(); ++i) { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify(node->child(i), + i == node->start() ? lo : &node->key(i - 1), + i == node->finish() ? hi : &node->key(i)); + } + } + return count; +} + +struct btree_access { + template + static auto erase_if(BtreeContainer &container, Pred pred) -> + typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node_; + if (node->is_internal()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) { + it.update_generation(); + if (pred(*it)) { + node->value_destroy(it.position_, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + +#undef ABSL_BTREE_ENABLE_GENERATIONS + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/third_party/absl/absl/container/internal/btree_container.h b/third_party/absl/absl/container/internal/btree_container.h new file mode 100644 index 000000000..a68ce4455 --- /dev/null +++ b/third_party/absl/absl/container/internal/btree_container.h @@ -0,0 +1,763 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A common base class for btree_set, btree_map, btree_multiset, and +// btree_multimap. +template +class btree_container { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::template type< + K, typename Tree::key_type>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::original_key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + struct extract_and_get_next_return_type { + node_type node; + iterator next; + }; + + // Constructors/assignments. + btree_container() : tree_(key_compare(), allocator_type()) {} + explicit btree_container(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : tree_(comp, alloc) {} + explicit btree_container(const allocator_type &alloc) + : tree_(key_compare(), alloc) {} + + btree_container(const btree_container &other) + : btree_container(other, absl::allocator_traits:: + select_on_container_copy_construction( + other.get_allocator())) {} + btree_container(const btree_container &other, const allocator_type &alloc) + : tree_(other.tree_, alloc) {} + + btree_container(btree_container &&other) noexcept( + std::is_nothrow_move_constructible::value) = default; + btree_container(btree_container &&other, const allocator_type &alloc) + : tree_(std::move(other.tree_), alloc) {} + + btree_container &operator=(const btree_container &other) = default; + btree_container &operator=(btree_container &&other) noexcept( + std::is_nothrow_move_assignable::value) = default; + + // Iterator routines. + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.begin(); } + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.begin(); + } + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.begin(); + } + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.end(); } + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.end(); + } + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.end(); + } + reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.rbegin(); + } + const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.rbegin(); + } + const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.rbegin(); + } + reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.rend(); } + const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.rend(); + } + const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.rend(); + } + + // Lookup routines. + template + size_type count(const key_arg &key) const { + auto equal_range = this->equal_range(key); + return equal_range.second - equal_range.first; + } + template + iterator find(const key_arg &key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.find(key); + } + template + const_iterator find(const key_arg &key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.find(key); + } + template + bool contains(const key_arg &key) const { + return find(key) != end(); + } + template + iterator lower_bound(const key_arg &key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.lower_bound(key); + } + template + const_iterator lower_bound(const key_arg &key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.lower_bound(key); + } + template + iterator upper_bound(const key_arg &key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.upper_bound(key); + } + template + const_iterator upper_bound(const key_arg &key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.upper_bound(key); + } + template + std::pair equal_range(const key_arg &key) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.equal_range(key); + } + template + std::pair equal_range( + const key_arg &key) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.erase(iterator(iter)); + } + iterator erase(iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.erase(iter); + } + iterator erase(const_iterator first, + const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return tree_.erase_range(iterator(first), iterator(last)).second; + } + template + size_type erase(const key_arg &key) { + auto equal_range = this->equal_range(key); + return tree_.erase_range(equal_range.first, equal_range.second).first; + } + + // Extract routines. + extract_and_get_next_return_type extract_and_get_next(const_iterator position) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + // Note: we rely on erase() taking place after Construct(). + return {CommonAccess::Construct(get_allocator(), + iterator(position).slot()), + erase(position)}; + } + node_type extract(iterator position) { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + auto node = + CommonAccess::Construct(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) { + return extract(iterator(position)); + } + + // Utility routines. + ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } + void swap(btree_container &other) { tree_.swap(other.tree_); } + void verify() const { tree_.verify(); } + + // Size routines. + size_type size() const { return tree_.size(); } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { return tree_.empty(); } + + friend bool operator==(const btree_container &x, const btree_container &y) { + if (x.size() != y.size()) return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container &x, const btree_container &y) { + return !(x == y); + } + + friend bool operator<(const btree_container &x, const btree_container &y) { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container &x, const btree_container &y) { + return y < x; + } + + friend bool operator<=(const btree_container &x, const btree_container &y) { + return !(y < x); + } + + friend bool operator>=(const btree_container &x, const btree_container &y) { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return tree_.get_allocator(); } + + // The key comparator used by the btree. + key_compare key_comp() const { return key_compare(tree_.key_comp()); } + value_compare value_comp() const { return tree_.value_comp(); } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container &b) { + for (const auto &v : b) { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + friend struct btree_access; + Tree tree_; +}; + +// A common base class for btree_set and btree_map. +template +class btree_set_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() {} + + // Range constructors. + template + btree_set_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + template + btree_set_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_set_container(b, e, key_compare(), alloc) {} + + // Initializer list constructors. + btree_set_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_set_container(init.begin(), init.end(), comp, alloc) {} + btree_set_container(std::initializer_list init, + const allocator_type &alloc) + : btree_set_container(init.begin(), init.end(), alloc) {} + + // Insertion routines. + std::pair insert(const value_type &v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_unique(params_type::key(v), v); + } + std::pair insert(value_type &&v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + template + std::pair emplace(Args &&...args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); + return this->tree_.insert_unique(params_type::key(slot), slot); + } + iterator insert(const_iterator hint, + const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), v) + .first; + } + iterator insert(const_iterator hint, + value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) + .first; + } + template + iterator emplace_hint(const_iterator hint, + Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(slot), slot) + .first; + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_unique(b, e, 0); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); + } + insert_return_type insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!node) return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, + node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!node) return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) CommonAccess::Destroy(&node); + return res.first; + } + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) { + if (insert(std::move(params_type::element(src_it.slot()))).second) { + src_it = src.erase(src_it); + } else { + ++src_it; + } + } + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// Base class for btree_map. +template +class btree_map_container : public btree_set_container { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + private: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() {} + + // Insertion routines. + // Note: the nullptr template arguments and extra `const M&` overloads allow + // for supporting bitfield arguments. + template + std::pair insert_or_assign(const key_arg &k, const M &obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(k, obj); + } + template + std::pair insert_or_assign(key_arg &&k, const M &obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(std::forward(k), obj); + } + template + std::pair insert_or_assign(const key_arg &k, M &&obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(k, std::forward(obj)); + } + template + std::pair insert_or_assign(key_arg &&k, M &&obj) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(std::forward(k), std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, + const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_hint_impl(hint, k, obj); + } + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, + const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_hint_impl(hint, std::forward(k), obj); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, + M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_hint_impl(hint, k, std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, + M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_hint_impl(hint, std::forward(k), + std::forward(obj)); + } + + template ::value, int> = 0> + std::pair try_emplace(const key_arg &k, Args &&...args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_impl(k, std::forward(args)...); + } + template ::value, int> = 0> + std::pair try_emplace(key_arg &&k, Args &&...args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, const key_arg &k, + Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_hint_impl(hint, k, std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, key_arg &&k, + Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_hint_impl(hint, std::forward(k), + std::forward(args)...); + } + + template + mapped_type &operator[](const key_arg &k) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace(k).first->second; + } + template + mapped_type &operator[](key_arg &&k) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace(std::forward(k)).first->second; + } + + template + mapped_type &at(const key_arg &key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template + const mapped_type &at(const key_arg &key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + + private: + // Note: when we call `std::forward(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template + std::pair insert_or_assign_impl(K &&k, M &&obj) { + const std::pair ret = + this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret; + } + template + iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { + const std::pair ret = this->tree_.insert_hint_unique( + iterator(hint), k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret.first; + } + + template + std::pair try_emplace_impl(K &&k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + } + template + iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)) + .first; + } +}; + +// A common base class for btree_multiset and btree_multimap. +template +class btree_multiset_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() {} + + // Range constructors. + template + btree_multiset_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + template + btree_multiset_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_multiset_container(b, e, key_compare(), alloc) {} + + // Initializer list constructors. + btree_multiset_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + btree_multiset_container(std::initializer_list init, + const allocator_type &alloc) + : btree_multiset_container(init.begin(), init.end(), alloc) {} + + // Insertion routines. + iterator insert(const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_multi(v); + } + iterator insert(value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_multi(std::move(v)); + } + iterator insert(const_iterator hint, + const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_hint_multi(iterator(hint), v); + } + iterator insert(const_iterator hint, + value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_multi(CommonAccess::GetSlot(node)); + } + template + iterator emplace_hint(const_iterator hint, + Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_hint_multi(iterator(hint), + CommonAccess::GetSlot(node)); + } + iterator insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!node) return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, + node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!node) return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node)))); + CommonAccess::Destroy(&node); + return res; + } + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { + insert(std::move(params_type::element(src_it.slot()))); + } + src.clear(); + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// A base class for btree_multimap. +template +class btree_multimap_container : public btree_multiset_container { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/third_party/absl/absl/container/internal/common.h b/third_party/absl/absl/container/internal/common.h new file mode 100644 index 000000000..9239bb4d0 --- /dev/null +++ b/third_party/absl/absl/container/internal/common.h @@ -0,0 +1,207 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_H_ + +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct IsTransparent : std::false_type {}; +template +struct IsTransparent> + : std::true_type {}; + +template +struct KeyArg { + // Transparent. Forward `K`. + template + using type = K; +}; + +template <> +struct KeyArg { + // Not transparent. Always use `key_type`. + template + using type = key_type; +}; + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template +class node_handle_base { + protected: + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() = default; + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) noexcept { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + friend struct CommonAccess; + + struct transfer_tag_t {}; + node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) + : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + struct construct_tag_t {}; + template + node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) + : alloc_(a) { + PolicyTraits::construct(alloc(), slot(), std::forward(args)...); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional alloc_ = {}; + alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; +}; + +// For sets. +template +class node_handle : public node_handle_base { + using Base = node_handle_base; + + public: + using value_type = typename PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { return PolicyTraits::element(this->slot()); } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// For maps. +template +class node_handle> + : public node_handle_base { + using Base = node_handle_base; + using slot_type = typename PolicyTraits::slot_type; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key. Otherwise, we provide const access. + auto key() const + -> decltype(PolicyTraits::mutable_key(std::declval())) { + return PolicyTraits::mutable_key(this->slot()); + } + + mapped_type& mapped() const { + return PolicyTraits::value(&PolicyTraits::element(this->slot())); + } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// Provide access to non-public node-handle functions. +struct CommonAccess { + template + static auto GetSlot(const Node& node) -> decltype(node.slot()) { + return node.slot(); + } + + template + static void Destroy(Node* node) { + node->destroy(); + } + + template + static void Reset(Node* node) { + node->reset(); + } + + template + static T Transfer(Args&&... args) { + return T(typename T::transfer_tag_t{}, std::forward(args)...); + } + + template + static T Construct(Args&&... args) { + return T(typename T::construct_tag_t{}, std::forward(args)...); + } +}; + +// Implement the insert_return_type<> concept of C++17. +template +struct InsertReturnType { + Iterator position; + bool inserted; + NodeType node; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COMMON_H_ diff --git a/third_party/absl/absl/container/internal/common_policy_traits.h b/third_party/absl/absl/container/internal/common_policy_traits.h new file mode 100644 index 000000000..57eac678f --- /dev/null +++ b/third_party/absl/absl/container/internal/common_policy_traits.h @@ -0,0 +1,134 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct common_policy_traits { + // The actual object stored in the container. + using slot_type = typename Policy::slot_type; + using reference = decltype(Policy::element(std::declval())); + using value_type = typename std::remove_reference::type; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, Rank0{}); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + // Note: we use remove_const_t so that the two overloads have different args + // in the case of sets with explicitly const value_types. + template + static auto element(absl::remove_const_t* slot) + -> decltype(P::element(slot)) { + return P::element(slot); + } + template + static auto element(const slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + static constexpr bool transfer_uses_memcpy() { + return std::is_same>( + nullptr, nullptr, nullptr, Rank0{})), + std::true_type>::value; + } + + private: + // To rank the overloads below for overload resolution. Rank0 is preferred. + struct Rank2 {}; + struct Rank1 : Rank2 {}; + struct Rank0 : Rank1 {}; + + // Use auto -> decltype as an enabler. + // P::transfer returns std::true_type if transfer uses memcpy (e.g. in + // node_slot_policy). + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, Rank0) + -> decltype(P::transfer(alloc, new_slot, old_slot)) { + return P::transfer(alloc, new_slot, old_slot); + } +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + // This overload returns true_type for the trait below. + // The conditional_t is to make the enabler type dependent. + template >::value>> + static std::true_type transfer_impl(Alloc*, slot_type* new_slot, + slot_type* old_slot, Rank1) { + // TODO(b/247130232): remove casts after fixing warnings. + // TODO(b/251814870): remove casts after fixing warnings. + std::memcpy( + static_cast(std::launder( + const_cast*>(&element(new_slot)))), + static_cast(&element(old_slot)), sizeof(value_type)); + return {}; + } +#endif + + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, Rank2) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_ diff --git a/third_party/absl/absl/container/internal/common_policy_traits_test.cc b/third_party/absl/absl/container/internal/common_policy_traits_test.cc new file mode 100644 index 000000000..faee3e7a9 --- /dev/null +++ b/third_party/absl/absl/container/internal/common_policy_traits_test.cc @@ -0,0 +1,134 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/common_policy_traits.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::AnyNumber; +using ::testing::ReturnRef; + +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function construct; + static std::function destroy; + + static std::function element; +}; + +std::function PolicyWithoutOptionalOps::construct; +std::function PolicyWithoutOptionalOps::destroy; + +std::function PolicyWithoutOptionalOps::element; + +struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { + static std::function transfer; +}; +std::function PolicyWithOptionalOps::transfer; + +struct PolicyWithMemcpyTransfer : PolicyWithoutOptionalOps { + static std::function transfer; +}; +std::function + PolicyWithMemcpyTransfer::transfer; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { + construct.Call(a1, a2, std::move(a3)); + }; + PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { + destroy.Call(a1, a2); + }; + + PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { + return element.Call(a1); + }; + + PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { + return transfer.Call(a1, a2, a3); + }; + } + + std::allocator alloc; + int a = 53; + + MockFunction construct; + MockFunction destroy; + + MockFunction element; + + MockFunction transfer; +}; + +TEST_F(Test, construct) { + EXPECT_CALL(construct, Call(&alloc, &a, 53)); + common_policy_traits::construct(&alloc, &a, 53); +} + +TEST_F(Test, destroy) { + EXPECT_CALL(destroy, Call(&alloc, &a)); + common_policy_traits::destroy(&alloc, &a); +} + +TEST_F(Test, element) { + int b = 0; + EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &common_policy_traits::element(&a)); +} + +TEST_F(Test, without_transfer) { + int b = 42; + EXPECT_CALL(element, Call(&a)).Times(AnyNumber()).WillOnce(ReturnRef(a)); + EXPECT_CALL(element, Call(&b)).WillOnce(ReturnRef(b)); + EXPECT_CALL(construct, Call(&alloc, &a, b)).Times(AnyNumber()); + EXPECT_CALL(destroy, Call(&alloc, &b)).Times(AnyNumber()); + common_policy_traits::transfer(&alloc, &a, &b); +} + +TEST_F(Test, with_transfer) { + int b = 42; + EXPECT_CALL(transfer, Call(&alloc, &a, &b)); + common_policy_traits::transfer(&alloc, &a, &b); +} + +TEST(TransferUsesMemcpy, Basic) { + EXPECT_FALSE( + common_policy_traits::transfer_uses_memcpy()); + EXPECT_TRUE( + common_policy_traits::transfer_uses_memcpy()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/compressed_tuple.h b/third_party/absl/absl/container/internal/compressed_tuple.h new file mode 100644 index 000000000..59e70eb21 --- /dev/null +++ b/third_party/absl/absl/container/internal/compressed_tuple.h @@ -0,0 +1,272 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include +#include + +#include "absl/utility/utility.h" + +#if defined(_MSC_VER) && !defined(__NVCC__) +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class CompressedTuple; + +namespace internal_compressed_tuple { + +template +struct Elem; +template +struct Elem, I> + : std::tuple_element> {}; +template +using ElemT = typename Elem::type; + +// We can't use EBCO on other CompressedTuples because that would mean that we +// derive from multiple Storage<> instantiations with the same I parameter, +// and potentially from multiple identical Storage<> instantiations. So anytime +// we use type inheritance rather than encapsulation, we mark +// CompressedTupleImpl, to make this easy to detect. +struct uses_inheritance {}; + +template +constexpr bool ShouldUseBase() { + return std::is_class::value && std::is_empty::value && + !std::is_final::value && + !std::is_base_of::value; +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template ()> +struct Storage { + T value; + constexpr Storage() = default; + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : value(absl::forward(v)) {} + constexpr const T& get() const& { return value; } + T& get() & { return value; } + constexpr const T&& get() const&& { return absl::move(*this).value; } + T&& get() && { return std::move(*this).value; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { + constexpr Storage() = default; + + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : T(absl::forward(v)) {} + + constexpr const T& get() const& { return *this; } + T& get() & { return *this; } + constexpr const T&& get() const&& { return absl::move(*this); } + T&& get() && { return std::move(*this); } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, ShouldAnyUseBase> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : uses_inheritance, + Storage::value>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, false> + // We use the dummy identity function as above... + : Storage::value, false>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +std::false_type Or(std::initializer_list); +std::true_type Or(std::initializer_list); + +// MSVC requires this to be done separately rather than within the declaration +// of CompressedTuple below. +template +constexpr bool ShouldAnyUseBase() { + return decltype( + Or({std::integral_constant()>()...})){}; +} + +template +using TupleElementMoveConstructible = + typename std::conditional::value, + std::is_convertible, + std::is_constructible>::type; + +template +struct TupleMoveConstructible : std::false_type {}; + +template +struct TupleMoveConstructible, Vs...> + : std::integral_constant< + bool, absl::conjunction< + TupleElementMoveConstructible...>::value> {}; + +template +struct compressed_tuple_size; + +template +struct compressed_tuple_size> + : public std::integral_constant {}; + +template +struct TupleItemsMoveConstructible + : std::integral_constant< + bool, TupleMoveConstructible::value == + sizeof...(Vs), + T, Vs...>::value> {}; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple is itself an empty class. (This +// does not apply when one or more of those empty classes is itself an empty +// CompressedTuple.) +// +// To access the members, use member .get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo +template +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple, absl::index_sequence_for, + internal_compressed_tuple::ShouldAnyUseBase()> { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + template + using StorageT = internal_compressed_tuple::Storage, I>; + + public: + // There seems to be a bug in MSVC dealing in which using '=default' here will + // cause the compiler to ignore the body of other constructors. The work- + // around is to explicitly implement the default constructor. +#if defined(_MSC_VER) + constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {} +#else + constexpr CompressedTuple() = default; +#endif + explicit constexpr CompressedTuple(const Ts&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {} + + template )>>, + internal_compressed_tuple::TupleItemsMoveConstructible< + CompressedTuple, First, Vs...>>::value, + bool> = true> + explicit constexpr CompressedTuple(First&& first, Vs&&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, + absl::forward(first), + absl::forward(base)...) {} + + template + ElemT& get() & { + return StorageT::get(); + } + + template + constexpr const ElemT& get() const& { + return StorageT::get(); + } + + template + ElemT&& get() && { + return std::move(*this).StorageT::get(); + } + + template + constexpr const ElemT&& get() const&& { + return absl::move(*this).StorageT::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/third_party/absl/absl/container/internal/compressed_tuple_test.cc b/third_party/absl/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 000000000..74111f975 --- /dev/null +++ b/third_party/absl/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,419 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/memory/memory.h" +#include "absl/types/any.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +// These are declared at global scope purely so that error messages +// are smaller and easier to understand. +enum class CallType { kConstRef, kConstMove }; + +template +struct Empty { + constexpr CallType value() const& { return CallType::kConstRef; } + constexpr CallType value() const&& { return CallType::kConstMove; } +}; + +template +struct NotEmpty { + T value; +}; + +template +struct TwoValues { + T value1; + U value2; +}; + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::InstanceTracker; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty, Empty<1>>)); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) { + InstanceTracker tracker; + CompressedTuple x1(CopyableMovableInstance(1)); + EXPECT_EQ(tracker.instances(), 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) { + InstanceTracker tracker; + + CopyableMovableInstance i1(1); + CompressedTuple x1(std::move(i1)); + EXPECT_EQ(tracker.instances(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + Empty<0> empty; + CompressedTuple> + x1(std::move(i1), i2, empty); + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +struct IncompleteType; +CompressedTuple> +MakeWithIncomplete(CopyableMovableInstance i1, + IncompleteType& t, // NOLINT + Empty<0> empty) { + return CompressedTuple>{ + std::move(i1), t, empty}; +} + +struct IncompleteType {}; +TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + Empty<0> empty; + struct DerivedType : IncompleteType {int value = 0;}; + DerivedType fd; + fd.value = 7; + + CompressedTuple> x1 = + MakeWithIncomplete(std::move(i1), fd, empty); + + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(static_cast(x1.get<1>()).value, 7); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 2); +} + +TEST(CompressedTupleTest, + OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + CompressedTuple> + x1(std::move(i1), i2, {}); // NOLINT + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.instances(), 3); + // We are forced into the `const Ts&...` constructor (invoking copies) + // because we need it to deduce the type of `{}`. + // std::tuple also has this behavior. + // Note, this test is proof that this is expected behavior, but it is not + // _desired_ behavior. + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneCopyOnLValueConstruction) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + + CompressedTuple x1(i1); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); + + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2(2); + const CopyableMovableInstance& i2_ref = i2; + CompressedTuple x2(i2_ref); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneMoveOnRValueAccess) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CompressedTuple x(std::move(i1)); + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2 = std::move(x).get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, OneCopyOnLValueAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance t = x.get<0>(); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, ZeroCopyOnRefAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance& t1 = x.get<0>(); + const CopyableMovableInstance& t2 = x.get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(t1.value(), 0); + EXPECT_EQ(t2.value(), 0); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple, NotEmpty> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple, + CompressedTuple>> + x(1, CompressedTuple(2), + CompressedTuple>(3, CompressedTuple(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple, Empty<0>, + CompressedTuple, CompressedTuple>>> + y; + std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple, + CompressedTuple>)); + EXPECT_TRUE((std::is_empty, Empty<1>>>::value)); + + // Make sure everything still works when things are nested. + struct CT_Empty : CompressedTuple> {}; + CompressedTuple, CT_Empty> nested_empty; + auto contained = nested_empty.get<0>(); + auto nested = nested_empty.get<1>().get<0>(); + EXPECT_TRUE((std::is_same::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long string that goes in the heap"; + CompressedTuple x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty>::value); +} + +TEST(CompressedTupleTest, MoveOnlyElements) { + CompressedTuple> str_tup( + absl::make_unique("str")); + + CompressedTuple>, + std::unique_ptr> + x(std::move(str_tup), absl::make_unique(5)); + + EXPECT_EQ(*x.get<0>().get<0>(), "str"); + EXPECT_EQ(*x.get<1>(), 5); + + std::unique_ptr x0 = std::move(x.get<0>()).get<0>(); + std::unique_ptr x1 = std::move(x).get<1>(); + + EXPECT_EQ(*x0, "str"); + EXPECT_EQ(*x1, 5); +} + +TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) { + CompressedTuple> base( + absl::make_unique("str")); + EXPECT_EQ(*base.get<0>(), "str"); + + CompressedTuple> copy(std::move(base)); + EXPECT_EQ(*copy.get<0>(), "str"); +} + +TEST(CompressedTupleTest, AnyElements) { + any a(std::string("str")); + CompressedTuple x(any(5), a); + EXPECT_EQ(absl::any_cast(x.get<0>()), 5); + EXPECT_EQ(absl::any_cast(x.get<1>()), "str"); + + a = 0.5f; + EXPECT_EQ(absl::any_cast(x.get<1>()), 0.5); +} + +TEST(CompressedTupleTest, Constexpr) { + struct NonTrivialStruct { + constexpr NonTrivialStruct() = default; + constexpr int value() const { return v; } + int v = 5; + }; + struct TrivialStruct { + TrivialStruct() = default; + constexpr int value() const { return v; } + int v; + }; + constexpr CompressedTuple, Empty<0>> x( + 7, 1.25, CompressedTuple(5), {}); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + constexpr CallType x3 = x.get<3>().value(); + + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); + EXPECT_EQ(x3, CallType::kConstRef); + +#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4 + constexpr CompressedTuple, TrivialStruct, int> trivial = {}; + constexpr CallType trivial0 = trivial.get<0>().value(); + constexpr int trivial1 = trivial.get<1>().value(); + constexpr int trivial2 = trivial.get<2>(); + + EXPECT_EQ(trivial0, CallType::kConstRef); + EXPECT_EQ(trivial1, 0); + EXPECT_EQ(trivial2, 0); +#endif + + constexpr CompressedTuple, NonTrivialStruct, absl::optional> + non_trivial = {}; + constexpr CallType non_trivial0 = non_trivial.get<0>().value(); + constexpr int non_trivial1 = non_trivial.get<1>().value(); + constexpr absl::optional non_trivial2 = non_trivial.get<2>(); + + EXPECT_EQ(non_trivial0, CallType::kConstRef); + EXPECT_EQ(non_trivial1, 5); + EXPECT_EQ(non_trivial2, absl::nullopt); + + static constexpr char data[] = "DEF"; + constexpr CompressedTuple z(data); + constexpr const char* z1 = z.get<0>(); + EXPECT_EQ(std::string(z1), std::string(data)); + +#if defined(__clang__) + // An apparent bug in earlier versions of gcc claims these are ambiguous. + constexpr int x2m = absl::move(x.get<2>()).get<0>(); + constexpr CallType x3m = absl::move(x).get<3>().value(); + EXPECT_EQ(x2m, 5); + EXPECT_EQ(x3m, CallType::kConstMove); +#endif +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +// TODO(b/214288561): enable this test. +TEST(CompressedTupleTest, DISABLED_NestedEbo) { + struct Empty1 {}; + struct Empty2 {}; + CompressedTuple, int> x; + CompressedTuple y; + // Currently fails with sizeof(x) == 8, sizeof(y) == 4. + EXPECT_EQ(sizeof(x), sizeof(y)); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/container_memory.h b/third_party/absl/absl/container/internal/container_memory.h new file mode 100644 index 000000000..3262d4eb8 --- /dev/null +++ b/third_party/absl/absl/container/internal/container_memory.h @@ -0,0 +1,458 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct alignas(Alignment) AlignedType {}; + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate(alloc, n). +template +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + AT::deallocate(my_mem_alloc, static_cast(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence) { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template +auto TupleRef(T&& t) -> decltype(TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template +decltype(std::declval()(std::declval())) WithConstructed(Tuple&& t, + F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// Given arguments of an std::pair's constructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(a.first), std::move(a.second)); +inline std::pair, std::tuple<>> PairArgs() { return {}; } +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf::kFirst and +// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template +struct OffsetOf { + static constexpr size_t kFirst = static_cast(-1); + static constexpr size_t kSecond = static_cast(-1); +}; + +template +struct OffsetOf::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); +}; + +} // namespace memory_internal + +// The internal storage type for key-value containers like flat_hash_map. +// +// It is convenient for the value_type of a flat_hash_map to be +// pair; the "const K" prevents accidental modification of the key +// when dealing with the reference returned from find() and similar methods. +// However, this creates other problems; we want to be able to emplace(K, V) +// efficiently with move operations, and similarly be able to move a +// pair in insert(). +// +// The solution is this union, which aliases the const and non-const versions +// of the pair. This also allows flat_hash_map to work, even though +// that has the same efficiency issues with move in emplace() and insert() - +// but people do it anyway. +// +// If kMutableKeys is false, only the value member can be accessed. +// +// If kMutableKeys is true, key can be accessed through all slots while value +// and mutable_value must be accessed only via INITIALIZED slots. Slots are +// created and destroyed via mutable_value so that the key can be moved later. +// +// Accessing one of the union fields while the other is active is safe as +// long as they are layout-compatible, which is guaranteed by the definition of +// kMutableKeys. For C++11, the relevant section of the standard is +// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) +template +union map_slot_type { + map_slot_type() {} + ~map_slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + value_type value; + mutable_value_type mutable_value; + absl::remove_const_t key; +}; + +template +struct map_slot_policy { + using slot_type = map_slot_type; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = memory_internal::IsLayoutCompatible; + + public: + static value_type& element(slot_type* slot) { return slot->value; } + static const value_type& element(const slot_type* slot) { + return slot->value; + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + static K& mutable_key(slot_type* slot) { + // Still check for kMutableKeys so that we can avoid calling std::launder + // unless necessary because it can interfere with optimizations. + return kMutableKeys::value ? slot->key + : *std::launder(const_cast( + std::addressof(slot->value.first))); + } +#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) + static const K& mutable_key(slot_type* slot) { return key(slot); } +#endif + + static const K& key(const slot_type* slot) { + return kMutableKeys::value ? slot->key : slot->value.first; + } + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, + std::forward(args)...); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + // Construct this slot by copying from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, + const slot_type* other) { + emplace(slot); + absl::allocator_traits::construct(*alloc, &slot->value, + other->value); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static auto transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + auto is_relocatable = + typename absl::is_trivially_relocatable::type(); + + emplace(new_slot); +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + if (is_relocatable) { + // TODO(b/247130232,b/251814870): remove casts after fixing warnings. + std::memcpy(static_cast(std::launder(&new_slot->value)), + static_cast(&old_slot->value), + sizeof(value_type)); + return is_relocatable; + } +#endif + + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + return is_relocatable; + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/third_party/absl/absl/container/internal/container_memory_test.cc b/third_party/absl/absl/container/internal/container_memory_test.cc new file mode 100644 index 000000000..90d64bf55 --- /dev/null +++ b/third_party/absl/absl/container/internal/container_memory_test.cc @@ -0,0 +1,286 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/no_destructor.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::testing::_; +using ::testing::ElementsAre; +using ::testing::Gt; +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +std::map& AllocationMap() { + static absl::NoDestructor> map; + return *map; +} + +template +struct TypeCountingAllocator { + TypeCountingAllocator() = default; + template + TypeCountingAllocator(const TypeCountingAllocator&) {} // NOLINT + + using value_type = T; + + T* allocate(size_t n, const void* = nullptr) { + AllocationMap()[typeid(T)] += n; + return std::allocator().allocate(n); + } + void deallocate(T* p, std::size_t n) { + AllocationMap()[typeid(T)] -= n; + return std::allocator().deallocate(p, n); + } +}; + +TEST(Memory, AllocateDeallocateMatchType) { + TypeCountingAllocator alloc; + void* mem = Allocate<1>(&alloc, 1); + // Verify that it was allocated + EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0)))); + Deallocate<1>(&alloc, mem, 1); + // Verify that the deallocation matched. + EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0))); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator; + + public: + Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits::destroy(*alloc(), ptr_); + std::allocator_traits::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template +decltype(DecomposeValue(std::declval(), std::declval())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward(f), std::forward(arg)); +} + +template +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template +decltype(DecomposeValueImpl(0, std::declval(), std::declval())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { // NOLINT + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template +decltype(DecomposePair(std::declval(), std::declval()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward(f), std::forward(args)...); +} + +template +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template +decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward(f), std::forward(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, // NOLINT + std::piecewise_construct_t, std::tuple k, + std::tuple&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +TEST(MapSlotPolicy, ConstKeyAndValue) { + using slot_policy = map_slot_policy; + using slot_type = typename slot_policy::slot_type; + + union Slots { + Slots() {} + ~Slots() {} + slot_type slots[100]; + } slots; + + std::allocator< + std::pair> + alloc; + InstanceTracker tracker; + slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1), + CopyableMovableInstance(1)); + for (int i = 0; i < 99; ++i) { + slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]); + } + slot_policy::destroy(&alloc, &slots.slots[99]); + + EXPECT_EQ(tracker.copies(), 0); +} + +TEST(MapSlotPolicy, TransferReturnsTrue) { + { + using slot_policy = map_slot_policy; + EXPECT_TRUE( + (std::is_same>( + nullptr, nullptr, nullptr)), + std::true_type>::value)); + } + { + struct NonRelocatable { + NonRelocatable() = default; + NonRelocatable(NonRelocatable&&) {} + NonRelocatable& operator=(NonRelocatable&&) { return *this; } + void* self = nullptr; + }; + + EXPECT_FALSE(absl::is_trivially_relocatable::value); + using slot_policy = map_slot_policy; + EXPECT_TRUE( + (std::is_same>( + nullptr, nullptr, nullptr)), + std::false_type>::value)); + } +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hash_function_defaults.h b/third_party/absl/absl/container/internal/hash_function_defaults.h new file mode 100644 index 000000000..a3613b4bc --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,209 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash +// function is hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" + +#ifdef ABSL_HAVE_STD_STRING_VIEW +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template +struct HashEq { + using Hash = absl::Hash; + using Eq = std::equal_to; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash{}(v); + } + size_t operator()(const absl::Cord& v) const { + return absl::Hash{}(v); + } +}; + +struct StringEq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + using Eq = StringEq; +}; + +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; + +#ifdef ABSL_HAVE_STD_STRING_VIEW + +template +struct BasicStringHash { + using is_transparent = void; + + size_t operator()(std::basic_string_view v) const { + return absl::Hash>{}(v); + } +}; + +template +struct BasicStringEq { + using is_transparent = void; + bool operator()(std::basic_string_view lhs, + std::basic_string_view rhs) const { + return lhs == rhs; + } +}; + +// Supports heterogeneous lookup for w/u16/u32 string + string_view + char*. +template +struct BasicStringHashEq { + using Hash = BasicStringHash; + using Eq = BasicStringEq; +}; + +template <> +struct HashEq : BasicStringHashEq {}; +template <> +struct HashEq : BasicStringHashEq {}; +template <> +struct HashEq : BasicStringHashEq {}; +template <> +struct HashEq : BasicStringHashEq {}; +template <> +struct HashEq : BasicStringHashEq {}; +template <> +struct HashEq : BasicStringHashEq {}; + +#endif // ABSL_HAVE_STD_STRING_VIEW + +// Supports heterogeneous lookup for pointers and smart pointers. +template +struct HashEq { + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; +template +struct HashEq> : HashEq {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map::hasher +template +using hash_default_hash = typename container_internal::HashEq::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map::key_equal +template +using hash_default_eq = typename container_internal::HashEq::Eq; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/third_party/absl/absl/container/internal/hash_function_defaults_test.cc b/third_party/absl/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 000000000..c31af3be9 --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,549 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/random/random.h" +#include "absl/strings/cord.h" +#include "absl/strings/cord_test_helpers.h" +#include "absl/strings/string_view.h" + +#ifdef ABSL_HAVE_STD_STRING_VIEW +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types; + +template +struct EqString : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqString, StringTypes); + +template +struct HashString : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +TEST(BasicStringViewTest, WStringEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(L"a", L"a")); + EXPECT_TRUE(eq(L"a", std::wstring_view(L"a"))); + EXPECT_TRUE(eq(L"a", std::wstring(L"a"))); + EXPECT_FALSE(eq(L"a", L"b")); + EXPECT_FALSE(eq(L"a", std::wstring_view(L"b"))); + EXPECT_FALSE(eq(L"a", std::wstring(L"b"))); +#endif +} + +TEST(BasicStringViewTest, WStringViewEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(L"a", L"a")); + EXPECT_TRUE(eq(L"a", std::wstring_view(L"a"))); + EXPECT_TRUE(eq(L"a", std::wstring(L"a"))); + EXPECT_FALSE(eq(L"a", L"b")); + EXPECT_FALSE(eq(L"a", std::wstring_view(L"b"))); + EXPECT_FALSE(eq(L"a", std::wstring(L"b"))); +#endif +} + +TEST(BasicStringViewTest, U16StringEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(u"a", u"a")); + EXPECT_TRUE(eq(u"a", std::u16string_view(u"a"))); + EXPECT_TRUE(eq(u"a", std::u16string(u"a"))); + EXPECT_FALSE(eq(u"a", u"b")); + EXPECT_FALSE(eq(u"a", std::u16string_view(u"b"))); + EXPECT_FALSE(eq(u"a", std::u16string(u"b"))); +#endif +} + +TEST(BasicStringViewTest, U16StringViewEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(u"a", u"a")); + EXPECT_TRUE(eq(u"a", std::u16string_view(u"a"))); + EXPECT_TRUE(eq(u"a", std::u16string(u"a"))); + EXPECT_FALSE(eq(u"a", u"b")); + EXPECT_FALSE(eq(u"a", std::u16string_view(u"b"))); + EXPECT_FALSE(eq(u"a", std::u16string(u"b"))); +#endif +} + +TEST(BasicStringViewTest, U32StringEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(U"a", U"a")); + EXPECT_TRUE(eq(U"a", std::u32string_view(U"a"))); + EXPECT_TRUE(eq(U"a", std::u32string(U"a"))); + EXPECT_FALSE(eq(U"a", U"b")); + EXPECT_FALSE(eq(U"a", std::u32string_view(U"b"))); + EXPECT_FALSE(eq(U"a", std::u32string(U"b"))); +#endif +} + +TEST(BasicStringViewTest, U32StringViewEqWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_eq eq; + EXPECT_TRUE(eq(U"a", U"a")); + EXPECT_TRUE(eq(U"a", std::u32string_view(U"a"))); + EXPECT_TRUE(eq(U"a", std::u32string(U"a"))); + EXPECT_FALSE(eq(U"a", U"b")); + EXPECT_FALSE(eq(U"a", std::u32string_view(U"b"))); + EXPECT_FALSE(eq(U"a", std::u32string(U"b"))); +#endif +} + +TEST(BasicStringViewTest, WStringHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(L"a"); + EXPECT_EQ(h, hash(std::wstring_view(L"a"))); + EXPECT_EQ(h, hash(std::wstring(L"a"))); + EXPECT_NE(h, hash(std::wstring_view(L"b"))); + EXPECT_NE(h, hash(std::wstring(L"b"))); +#endif +} + +TEST(BasicStringViewTest, WStringViewHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(L"a"); + EXPECT_EQ(h, hash(std::wstring_view(L"a"))); + EXPECT_EQ(h, hash(std::wstring(L"a"))); + EXPECT_NE(h, hash(std::wstring_view(L"b"))); + EXPECT_NE(h, hash(std::wstring(L"b"))); +#endif +} + +TEST(BasicStringViewTest, U16StringHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(u"a"); + EXPECT_EQ(h, hash(std::u16string_view(u"a"))); + EXPECT_EQ(h, hash(std::u16string(u"a"))); + EXPECT_NE(h, hash(std::u16string_view(u"b"))); + EXPECT_NE(h, hash(std::u16string(u"b"))); +#endif +} + +TEST(BasicStringViewTest, U16StringViewHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(u"a"); + EXPECT_EQ(h, hash(std::u16string_view(u"a"))); + EXPECT_EQ(h, hash(std::u16string(u"a"))); + EXPECT_NE(h, hash(std::u16string_view(u"b"))); + EXPECT_NE(h, hash(std::u16string(u"b"))); +#endif +} + +TEST(BasicStringViewTest, U32StringHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(U"a"); + EXPECT_EQ(h, hash(std::u32string_view(U"a"))); + EXPECT_EQ(h, hash(std::u32string(U"a"))); + EXPECT_NE(h, hash(std::u32string_view(U"b"))); + EXPECT_NE(h, hash(std::u32string(U"b"))); +#endif +} + +TEST(BasicStringViewTest, U32StringViewHashWorks) { +#ifndef ABSL_HAVE_STD_STRING_VIEW + GTEST_SKIP(); +#else + hash_default_hash hash; + auto h = hash(U"a"); + EXPECT_EQ(h, hash(std::u32string_view(U"a"))); + EXPECT_EQ(h, hash(std::u32string(U"a"))); + EXPECT_NE(h, hash(std::u32string_view(U"b"))); + EXPECT_NE(h, hash(std::u32string(U"b"))); +#endif +} + +struct NoDeleter { + template + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types, + std::unique_ptr, + std::unique_ptr, std::unique_ptr, + std::shared_ptr, std::shared_ptr>; + +template +struct EqPointer : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqPointer, PointerTypes); + +template +struct HashPointer : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); + + auto dp = std::make_shared(); + EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash hasher; + hash_default_eq eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +TEST(EqCord, Works) { + hash_default_eq eq; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_TRUE(eq(a_cord, a_cord)); + EXPECT_TRUE(eq(a_cord, a_string_view)); + EXPECT_TRUE(eq(a_string_view, a_cord)); + EXPECT_FALSE(eq(a_cord, b_cord)); + EXPECT_FALSE(eq(a_cord, b_string_view)); + EXPECT_FALSE(eq(b_string_view, a_cord)); +} + +TEST(HashCord, Works) { + hash_default_hash hash; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_EQ(hash(a_cord), hash(a_cord)); + EXPECT_EQ(hash(b_cord), hash(b_cord)); + EXPECT_EQ(hash(a_string_view), hash(a_cord)); + EXPECT_EQ(hash(b_string_view), hash(b_cord)); + EXPECT_EQ(hash(absl::Cord("")), hash("")); + EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view())); + + EXPECT_NE(hash(a_cord), hash(b_cord)); + EXPECT_NE(hash(a_cord), hash(b_string_view)); + EXPECT_NE(hash(a_string_view), hash(b_cord)); + EXPECT_NE(hash(a_string_view), hash(b_string_view)); +} + +void NoOpReleaser(absl::string_view data, void* arg) {} + +TEST(HashCord, FragmentedCordWorks) { + hash_default_hash hash; + absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash("abc")); +} + +TEST(HashCord, FragmentedLongCordWorks) { + hash_default_hash hash; + // Crete some large strings which do not fit on the stack. + std::string a(65536, 'a'); + std::string b(65536, 'b'); + absl::Cord c = absl::MakeFragmentedCord({a, b}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash(a + b)); +} + +TEST(HashCord, RandomCord) { + hash_default_hash hash; + auto bitgen = absl::BitGen(); + for (int i = 0; i < 1000; ++i) { + const int number_of_segments = absl::Uniform(bitgen, 0, 10); + std::vector pieces; + for (size_t s = 0; s < number_of_segments; ++s) { + std::string str; + str.resize(absl::Uniform(bitgen, 0, 4096)); + // MSVC needed the explicit return type in the lambda. + std::generate(str.begin(), str.end(), [&]() -> char { + return static_cast(absl::Uniform(bitgen)); + }); + pieces.push_back(str); + } + absl::Cord c = absl::MakeFragmentedCord(pieces); + EXPECT_EQ(hash(c), hash(std::string(c))); + } +} + +// Cartesian product of (std::string, absl::string_view) +// with (std::string, absl::string_view, const char*, absl::Cord). +using StringTypesCartesianProduct = Types< + // clang-format off + std::pair, + std::pair, + std::pair, + std::pair, + + std::pair, + std::pair, + + std::pair, + std::pair, + std::pair>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq eq; + hash_default_hash hash; +}; + +TYPED_TEST_SUITE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +enum Hash : size_t { + kStd = 0x1, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x2, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable is hashable via all means specified in H. +template +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template +struct hash> { + template , + class = typename std::enable_if::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +template +size_t Hash(const T& v) { + return hash_default_hash()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable())); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hash_generator_testing.cc b/third_party/absl/absl/container/internal/hash_generator_testing.cc new file mode 100644 index 000000000..e89dfdb5b --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_generator_testing.cc @@ -0,0 +1,78 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_generator_testing.h" + +#include + +#include "absl/base/no_destructor.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_internal { +namespace { + +class RandomDeviceSeedSeq { + public: + using result_type = typename std::random_device::result_type; + + template + void generate(Iterator start, Iterator end) { + while (start != end) { + *start = gen_(); + ++start; + } + } + + private: + std::random_device gen_; +}; + +} // namespace + +std::mt19937_64* GetSharedRng() { + static absl::NoDestructor rng([] { + RandomDeviceSeedSeq seed_seq; + return std::mt19937_64(seed_seq); + }()); + return rng.get(); +} + +std::string Generator::operator()() const { + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + std::string res; + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +absl::string_view Generator::operator()() const { + static absl::NoDestructor> arena; + // NOLINTNEXTLINE(runtime/int) + std::uniform_int_distribution chars(0x20, 0x7E); + arena->emplace_back(); + auto& res = arena->back(); + res.resize(32); + std::generate(res.begin(), res.end(), + [&]() { return chars(*GetSharedRng()); }); + return res; +} + +} // namespace hash_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hash_generator_testing.h b/third_party/absl/absl/container/internal/hash_generator_testing.h new file mode 100644 index 000000000..f1f555a5c --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,182 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template +struct IsMap : std::false_type {}; + +template +struct IsMap> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetSharedRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast(ec); +} + +template +struct Generator; + +template +struct Generator::value>::type> { + T operator()() const { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } +}; + +template <> +struct Generator { + Enum operator()() const { + std::uniform_int_distribution::type> + dist; + while (true) { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + std::string operator()() const; +}; + +template <> +struct Generator { + absl::string_view operator()() const; +}; + +template <> +struct Generator { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator()()); + } +}; + +template +struct Generator> { + std::pair operator()() const { + return std::pair(Generator::type>()(), + Generator::type>()()); + } +}; + +template +struct Generator> { + std::tuple operator()() const { + return std::tuple(Generator::type>()()...); + } +}; + +template +struct Generator> { + std::unique_ptr operator()() const { + return absl::make_unique(Generator()()); + } +}; + +template +struct Generator().key()), + decltype(std::declval().value())>> + : Generator().key())>::type, + typename std::decay().value())>::type>> {}; + +template +using GeneratedType = decltype( + std::declval::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +// Naive wrapper that performs a linear search of previous values. +// Beware this is O(SQR), which is reasonable for smaller kMaxValues. +template +struct UniqueGenerator { + Generator gen; + std::vector values; + + T operator()() { + assert(values.size() < kMaxValues); + for (;;) { + T value = gen(); + if (std::find(values.begin(), values.end(), value) == values.end()) { + values.push_back(value); + return value; + } + } + } +}; + +} // namespace hash_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/third_party/absl/absl/container/internal/hash_policy_testing.h b/third_party/absl/absl/container/internal/hash_policy_testing.h new file mode 100644 index 000000000..01c40d2e5 --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,184 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_testing_internal { + +template +struct WithId { + WithId() : id_(next_id()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template + size_t operator()(const T& t) const { + return absl::Hash{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template +struct Alloc : std::allocator { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} + + template + struct rebind { + using other = Alloc; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = (std::numeric_limits::max)(); +}; + +template +auto items(const Map& m) -> std::vector< + std::pair> { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template +auto keys(const Set& s) + -> std::vector::type> { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ +( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/third_party/absl/absl/container/internal/hash_policy_testing_test.cc b/third_party/absl/absl/container/internal/hash_policy_testing_test.cc new file mode 100644 index 000000000..f0b20fe34 --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_policy_testing_test.cc @@ -0,0 +1,45 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_testing.h" + +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +TEST(_, Hash) { + StatefulTestingHash h1; + EXPECT_EQ(1, h1.id()); + StatefulTestingHash h2; + EXPECT_EQ(2, h2.id()); + StatefulTestingHash h1c(h1); + EXPECT_EQ(1, h1c.id()); + StatefulTestingHash h2m(std::move(h2)); + EXPECT_EQ(2, h2m.id()); + EXPECT_EQ(0, h2.id()); + StatefulTestingHash h3; + EXPECT_EQ(3, h3.id()); + h3 = StatefulTestingHash(); + EXPECT_EQ(4, h3.id()); + h3 = std::move(h1); + EXPECT_EQ(1, h3.id()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hash_policy_traits.h b/third_party/absl/absl/container/internal/hash_policy_traits.h new file mode 100644 index 000000000..164ec1231 --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,157 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include + +#include "absl/container/internal/common_policy_traits.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct hash_policy_traits : common_policy_traits { + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + private: + struct ReturnKey { + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + template ::value, int> = 0> + static key_type& Impl(Key&& k, int) { + return *std::launder( + const_cast(std::addressof(std::forward(k)))); + } +#endif + + template + static Key Impl(Key&& k, char) { + return std::forward(k); + } + + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + auto operator()(Key&& k, const Args&...) const + -> decltype(Impl(std::forward(k), 0)) { + return Impl(std::forward(k), 0); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type {}; + + template + struct ConstantIteratorsImpl> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto mutable_key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), hash_policy_traits::element(slot))) { + return P::apply(ReturnKey(), hash_policy_traits::element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/third_party/absl/absl/container/internal/hash_policy_traits_test.cc b/third_party/absl/absl/container/internal/hash_policy_traits_test.cc new file mode 100644 index 000000000..82d7cc3a7 --- /dev/null +++ b/third_party/absl/absl/container/internal/hash_policy_traits_test.cc @@ -0,0 +1,80 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_policy_traits.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::MockFunction; +using ::testing::Return; +using ::testing::ReturnRef; + +using Alloc = std::allocator; +using Slot = int; + +struct PolicyWithoutOptionalOps { + using slot_type = Slot; + using key_type = Slot; + using init_type = Slot; + + static std::function element; + static int apply(int v) { return apply_impl(v); } + static std::function apply_impl; + static std::function value; +}; + +std::function PolicyWithoutOptionalOps::apply_impl; +std::function PolicyWithoutOptionalOps::value; + +struct Test : ::testing::Test { + Test() { + PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { + return apply.Call(a1); + }; + PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { + return value.Call(a1); + }; + } + + std::allocator alloc; + int a = 53; + MockFunction apply; + MockFunction value; +}; + +TEST_F(Test, apply) { + EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); + EXPECT_EQ(1337, (hash_policy_traits::apply(42))); +} + +TEST_F(Test, value) { + int b = 0; + EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); + EXPECT_EQ(&b, &hash_policy_traits::value(&a)); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hashtable_debug.h b/third_party/absl/absl/container/internal/hashtable_debug.h new file mode 100644 index 000000000..c79c1a989 --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtable_debug.h @@ -0,0 +1,102 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template +std::vector GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); + v.resize((std::max)(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/third_party/absl/absl/container/internal/hashtable_debug_hooks.h b/third_party/absl/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 000000000..3e9ea5954 --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,85 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/third_party/absl/absl/container/internal/hashtablez_sampler.cc b/third_party/absl/absl/container/internal/hashtablez_sampler.cc new file mode 100644 index 000000000..79a0973a6 --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtablez_sampler.cc @@ -0,0 +1,285 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/stacktrace.h" +#include "absl/memory/memory.h" +#include "absl/profiling/internal/exponential_biased.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/mutex.h" +#include "absl/time/clock.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr int HashtablezInfo::kMaxStackDepth; +#endif + +namespace { +ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ + false +}; +ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; +std::atomic g_hashtablez_config_listener{nullptr}; + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased + g_exponential_biased_generator; +#endif + +void TriggerHashtablezConfigListener() { + auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); + if (listener != nullptr) listener(); +} + +} // namespace + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +HashtablezSampler& GlobalHashtablezSampler() { + static auto* sampler = new HashtablezSampler(); + return *sampler; +} + +HashtablezInfo::HashtablezInfo() = default; +HashtablezInfo::~HashtablezInfo() = default; + +void HashtablezInfo::PrepareForSampling(int64_t stride, + size_t inline_element_size_value) { + capacity.store(0, std::memory_order_relaxed); + size.store(0, std::memory_order_relaxed); + num_erases.store(0, std::memory_order_relaxed); + num_rehashes.store(0, std::memory_order_relaxed); + max_probe_length.store(0, std::memory_order_relaxed); + total_probe_length.store(0, std::memory_order_relaxed); + hashes_bitwise_or.store(0, std::memory_order_relaxed); + hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); + hashes_bitwise_xor.store(0, std::memory_order_relaxed); + max_reserve.store(0, std::memory_order_relaxed); + + create_time = absl::Now(); + weight = stride; + // The inliner makes hardcoded skip_count difficult (especially when combined + // with LTO). We use the ability to exclude stacks by regex when encoding + // instead. + depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, + /* skip_count= */ 0); + inline_element_size = inline_element_size_value; +} + +static bool ShouldForceSampling() { + enum ForceState { + kDontForce, + kForce, + kUninitialized + }; + ABSL_CONST_INIT static std::atomic global_state{ + kUninitialized}; + ForceState state = global_state.load(std::memory_order_relaxed); + if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; + + if (state == kUninitialized) { + state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() + ? kForce + : kDontForce; + global_state.store(state, std::memory_order_relaxed); + } + return state == kForce; +} + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size) { + if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { + next_sample.next_sample = 1; + const int64_t old_stride = exchange(next_sample.sample_stride, 1); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(old_stride, inline_element_size); + return result; + } + +#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + next_sample = { + std::numeric_limits::max(), + std::numeric_limits::max(), + }; + return nullptr; +#else + bool first = next_sample.next_sample < 0; + + const int64_t next_stride = g_exponential_biased_generator.GetStride( + g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + + next_sample.next_sample = next_stride; + const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); + // Small values of interval are equivalent to just sampling next time. + ABSL_ASSERT(next_stride >= 1); + + // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold + // low enough that we will start sampling in a reasonable time, so we just use + // the default sampling rate. + if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr; + + // We will only be negative on our first count, so we should just retry in + // that case. + if (first) { + if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; + return SampleSlow(next_sample, inline_element_size); + } + + return GlobalHashtablezSampler().Register(old_stride, inline_element_size); +#endif +} + +void UnsampleSlow(HashtablezInfo* info) { + GlobalHashtablezSampler().Unregister(info); +} + +void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { +#ifdef ABSL_INTERNAL_HAVE_SSE2 + total_probe_length /= 16; +#else + total_probe_length /= 8; +#endif + info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_rehashes.store( + 1 + info->num_rehashes.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) { + info->max_reserve.store( + (std::max)(info->max_reserve.load(std::memory_order_relaxed), + target_capacity), + std::memory_order_relaxed); +} + +void RecordClearedReservationSlow(HashtablezInfo* info) { + info->max_reserve.store(0, std::memory_order_relaxed); +} + +void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, + size_t capacity) { + info->size.store(size, std::memory_order_relaxed); + info->capacity.store(capacity, std::memory_order_relaxed); + if (size == 0) { + // This is a clear, reset the total/num_erases too. + info->total_probe_length.store(0, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + } +} + +void RecordInsertSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired) { + // SwissTables probe in groups of 16, so scale this to count items probes and + // not offset from desired. + size_t probe_length = distance_from_desired; +#ifdef ABSL_INTERNAL_HAVE_SSE2 + probe_length /= 16; +#else + probe_length /= 8; +#endif + + info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); + info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); + info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); + info->max_probe_length.store( + std::max(info->max_probe_length.load(std::memory_order_relaxed), + probe_length), + std::memory_order_relaxed); + info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed); + info->size.fetch_add(1, std::memory_order_relaxed); +} + +void RecordEraseSlow(HashtablezInfo* info) { + info->size.fetch_sub(1, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +void SetHashtablezConfigListener(HashtablezConfigListener l) { + g_hashtablez_config_listener.store(l, std::memory_order_release); +} + +bool IsHashtablezEnabled() { + return g_hashtablez_enabled.load(std::memory_order_acquire); +} + +void SetHashtablezEnabled(bool enabled) { + SetHashtablezEnabledInternal(enabled); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezEnabledInternal(bool enabled) { + g_hashtablez_enabled.store(enabled, std::memory_order_release); +} + +int32_t GetHashtablezSampleParameter() { + return g_hashtablez_sample_parameter.load(std::memory_order_acquire); +} + +void SetHashtablezSampleParameter(int32_t rate) { + SetHashtablezSampleParameterInternal(rate); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezSampleParameterInternal(int32_t rate) { + if (rate > 0) { + g_hashtablez_sample_parameter.store(rate, std::memory_order_release); + } else { + ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld", + static_cast(rate)); // NOLINT(runtime/int) + } +} + +size_t GetHashtablezMaxSamples() { + return GlobalHashtablezSampler().GetMaxSamples(); +} + +void SetHashtablezMaxSamples(size_t max) { + SetHashtablezMaxSamplesInternal(max); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezMaxSamplesInternal(size_t max) { + if (max > 0) { + GlobalHashtablezSampler().SetMaxSamples(max); + } else { + ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0"); + } +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hashtablez_sampler.h b/third_party/absl/absl/container/internal/hashtablez_sampler.h new file mode 100644 index 000000000..e41ee2d74 --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtablez_sampler.h @@ -0,0 +1,257 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hashtablez_sampler.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for a low level library to sample hashtables +// and collect runtime statistics about them. +// +// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which +// store information about a single sample. +// +// `Record*` methods store information into samples. +// `Sample()` and `Unsample()` make use of a single global sampler with +// properties controlled by the flags hashtablez_enabled, +// hashtablez_sample_rate, and hashtablez_max_samples. +// +// WARNING +// +// Using this sampling API may cause sampled Swiss tables to use the global +// allocator (operator `new`) in addition to any custom allocator. If you +// are using a table in an unusual circumstance where allocation or calling a +// linux syscall is unacceptable, this could interfere. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Stores information about a sampled hashtable. All mutations to this *must* +// be made through `Record*` functions below. All reads from this *must* only +// occur in the callback to `HashtablezSampler::Iterate`. +struct HashtablezInfo : public profiling_internal::Sample { + // Constructs the object but does not fill in any fields. + HashtablezInfo(); + ~HashtablezInfo(); + HashtablezInfo(const HashtablezInfo&) = delete; + HashtablezInfo& operator=(const HashtablezInfo&) = delete; + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic capacity; + std::atomic size; + std::atomic num_erases; + std::atomic num_rehashes; + std::atomic max_probe_length; + std::atomic total_probe_length; + std::atomic hashes_bitwise_or; + std::atomic hashes_bitwise_and; + std::atomic hashes_bitwise_xor; + std::atomic max_reserve; + + // All of the fields below are set by `PrepareForSampling`, they must not be + // mutated in `Record*` functions. They are logically `const` in that sense. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. + static constexpr int kMaxStackDepth = 64; + absl::Time create_time; + int32_t depth; + void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? +}; + +void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length); + +void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity); + +void RecordClearedReservationSlow(HashtablezInfo* info); + +void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, + size_t capacity); + +void RecordInsertSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired); + +void RecordEraseSlow(HashtablezInfo* info); + +struct SamplingState { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; +}; + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size); +void UnsampleSlow(HashtablezInfo* info); + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() : info_(nullptr) {} + explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} + + // We do not have a destructor. Caller is responsible for calling Unregister + // before destroying the handle. + void Unregister() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + UnsampleSlow(info_); + } + + inline bool IsSampled() const { return ABSL_PREDICT_FALSE(info_ != nullptr); } + + inline void RecordStorageChanged(size_t size, size_t capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordStorageChangedSlow(info_, size, capacity); + } + + inline void RecordRehash(size_t total_probe_length) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordRehashSlow(info_, total_probe_length); + } + + inline void RecordReservation(size_t target_capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordClearedReservationSlow(info_); + } + + inline void RecordInsert(size_t hash, size_t distance_from_desired) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordInsertSlow(info_, hash, distance_from_desired); + } + + inline void RecordErase() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordEraseSlow(info_); + } + + friend inline void swap(HashtablezInfoHandle& lhs, + HashtablezInfoHandle& rhs) { + std::swap(lhs.info_, rhs.info_); + } + + private: + friend class HashtablezInfoHandlePeer; + HashtablezInfo* info_; +}; +#else +// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can +// be removed by the linker, in order to reduce the binary size. +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() = default; + explicit HashtablezInfoHandle(std::nullptr_t) {} + + inline void Unregister() {} + inline bool IsSampled() const { return false; } + inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} + inline void RecordRehash(size_t /*total_probe_length*/) {} + inline void RecordReservation(size_t /*target_capacity*/) {} + inline void RecordClearedReservation() {} + inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} + inline void RecordErase() {} + + friend inline void swap(HashtablezInfoHandle& /*lhs*/, + HashtablezInfoHandle& /*rhs*/) {} +}; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +// Returns an RAII sampling handle that manages registration and unregistation +// with the global sampler. +inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { + return HashtablezInfoHandle(nullptr); + } + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size)); +#else + return HashtablezInfoHandle(nullptr); +#endif // !ABSL_PER_THREAD_TLS +} + +using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder; + +// Returns a global Sampler. +HashtablezSampler& GlobalHashtablezSampler(); + +using HashtablezConfigListener = void (*)(); +void SetHashtablezConfigListener(HashtablezConfigListener l); + +// Enables or disables sampling for Swiss tables. +bool IsHashtablezEnabled(); +void SetHashtablezEnabled(bool enabled); +void SetHashtablezEnabledInternal(bool enabled); + +// Sets the rate at which Swiss tables will be sampled. +int32_t GetHashtablezSampleParameter(); +void SetHashtablezSampleParameter(int32_t rate); +void SetHashtablezSampleParameterInternal(int32_t rate); + +// Sets a soft max for the number of samples that will be kept. +size_t GetHashtablezMaxSamples(); +void SetHashtablezMaxSamples(size_t max); +void SetHashtablezMaxSamplesInternal(size_t max); + +// Configuration override. +// This allows process-wide sampling without depending on order of +// initialization of static storage duration objects. +// The definition of this constant is weak, which allows us to inject a +// different value for it at link time. +extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/third_party/absl/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/third_party/absl/absl/container/internal/hashtablez_sampler_force_weak_definition.cc new file mode 100644 index 000000000..ed35a7eec --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -0,0 +1,31 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include "absl/base/attributes.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL( + AbslContainerInternalSampleEverything)() { + return false; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/hashtablez_sampler_test.cc b/third_party/absl/absl/container/internal/hashtablez_sampler_test.cc new file mode 100644 index 000000000..8ebb08da4 --- /dev/null +++ b/third_party/absl/absl/container/internal/hashtablez_sampler_test.cc @@ -0,0 +1,424 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/internal/thread_pool.h" +#include "absl/synchronization/mutex.h" +#include "absl/synchronization/notification.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +constexpr int kProbeLength = 16; +#else +constexpr int kProbeLength = 8; +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +class HashtablezInfoHandlePeer { + public: + static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } +}; +#else +class HashtablezInfoHandlePeer { + public: + static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; } +}; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +namespace { +using ::absl::synchronization_internal::ThreadPool; +using ::testing::IsEmpty; +using ::testing::UnorderedElementsAre; + +std::vector GetSizes(HashtablezSampler* s) { + std::vector res; + s->Iterate([&](const HashtablezInfo& info) { + res.push_back(info.size.load(std::memory_order_acquire)); + }); + return res; +} + +HashtablezInfo* Register(HashtablezSampler* s, size_t size) { + const int64_t test_stride = 123; + const size_t test_element_size = 17; + auto* info = s->Register(test_stride, test_element_size); + assert(info != nullptr); + info->size.store(size); + return info; +} + +TEST(HashtablezInfoTest, PrepareForSampling) { + absl::Time test_start = absl::Now(); + const int64_t test_stride = 123; + const size_t test_element_size = 17; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_GE(info.create_time, test_start); + EXPECT_EQ(info.weight, test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); + + info.capacity.store(1, std::memory_order_relaxed); + info.size.store(1, std::memory_order_relaxed); + info.num_erases.store(1, std::memory_order_relaxed); + info.max_probe_length.store(1, std::memory_order_relaxed); + info.total_probe_length.store(1, std::memory_order_relaxed); + info.hashes_bitwise_or.store(1, std::memory_order_relaxed); + info.hashes_bitwise_and.store(1, std::memory_order_relaxed); + info.hashes_bitwise_xor.store(1, std::memory_order_relaxed); + info.max_reserve.store(1, std::memory_order_relaxed); + info.create_time = test_start - absl::Hours(20); + + info.PrepareForSampling(test_stride * 2, test_element_size); + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_EQ(info.weight, 2 * test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); + EXPECT_GE(info.create_time, test_start); +} + +TEST(HashtablezInfoTest, RecordStorageChanged) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 21; + const size_t test_element_size = 19; + info.PrepareForSampling(test_stride, test_element_size); + RecordStorageChangedSlow(&info, 17, 47); + EXPECT_EQ(info.size.load(), 17); + EXPECT_EQ(info.capacity.load(), 47); + RecordStorageChangedSlow(&info, 20, 20); + EXPECT_EQ(info.size.load(), 20); + EXPECT_EQ(info.capacity.load(), 20); +} + +TEST(HashtablezInfoTest, RecordInsert) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 25; + const size_t test_element_size = 23; + info.PrepareForSampling(test_stride, test_element_size); + EXPECT_EQ(info.max_probe_length.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00); + RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00); + RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 12); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00); +} + +TEST(HashtablezInfoTest, RecordErase) { + const int64_t test_stride = 31; + const size_t test_element_size = 29; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.size.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.size.load(), 1); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); +} + +TEST(HashtablezInfoTest, RecordRehash) { + const int64_t test_stride = 33; + const size_t test_element_size = 31; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + RecordInsertSlow(&info, 0x1, 0); + RecordInsertSlow(&info, 0x2, kProbeLength); + RecordInsertSlow(&info, 0x4, kProbeLength); + RecordInsertSlow(&info, 0x8, 2 * kProbeLength); + EXPECT_EQ(info.size.load(), 4); + EXPECT_EQ(info.total_probe_length.load(), 4); + + RecordEraseSlow(&info); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 4); + EXPECT_EQ(info.num_erases.load(), 2); + + RecordRehashSlow(&info, 3 * kProbeLength); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 3); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); +} + +TEST(HashtablezInfoTest, RecordReservation) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 35; + const size_t test_element_size = 33; + info.PrepareForSampling(test_stride, test_element_size); + RecordReservationSlow(&info, 3); + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 2); + // High watermark does not change + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 10); + // High watermark does change + EXPECT_EQ(info.max_reserve.load(), 10); +} + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +TEST(HashtablezSamplerTest, SmallSampleParameter) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + + for (int i = 0; i < 1000; ++i) { + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, LargeSampleParameter) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(std::numeric_limits::max()); + + for (int i = 0; i < 1000; ++i) { + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, Sample) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + int64_t num_sampled = 0; + int64_t total = 0; + double sample_rate = 0.0; + for (int i = 0; i < 1000000; ++i) { + HashtablezInfoHandle h = Sample(test_element_size); + ++total; + if (h.IsSampled()) { + ++num_sampled; + } + sample_rate = static_cast(num_sampled) / total; + if (0.005 < sample_rate && sample_rate < 0.015) break; + } + EXPECT_NEAR(sample_rate, 0.01, 0.005); +} + +TEST(HashtablezSamplerTest, Handle) { + auto& sampler = GlobalHashtablezSampler(); + const int64_t test_stride = 41; + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); + auto* info = HashtablezInfoHandlePeer::GetInfo(&h); + info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); + + bool found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + EXPECT_EQ(h.weight, test_stride); + EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); + found = true; + } + }); + EXPECT_TRUE(found); + + h.Unregister(); + h = HashtablezInfoHandle(); + found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + // this will only happen if some other thread has resurrected the info + // the old handle was using. + if (h.hashes_bitwise_and.load() == 0x12345678) { + found = true; + } + } + }); + EXPECT_FALSE(found); +} +#endif + + +TEST(HashtablezSamplerTest, Registration) { + HashtablezSampler sampler; + auto* info1 = Register(&sampler, 1); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); + + auto* info2 = Register(&sampler, 2); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); + info1->size.store(3); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); + + sampler.Unregister(info1); + sampler.Unregister(info2); +} + +TEST(HashtablezSamplerTest, Unregistration) { + HashtablezSampler sampler; + std::vector infos; + for (size_t i = 0; i < 3; ++i) { + infos.push_back(Register(&sampler, i)); + } + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); + + sampler.Unregister(infos[1]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); + + infos.push_back(Register(&sampler, 3)); + infos.push_back(Register(&sampler, 4)); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); + sampler.Unregister(infos[3]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); + + sampler.Unregister(infos[0]); + sampler.Unregister(infos[2]); + sampler.Unregister(infos[4]); + EXPECT_THAT(GetSizes(&sampler), IsEmpty()); +} + +TEST(HashtablezSamplerTest, MultiThreaded) { + HashtablezSampler sampler; + Notification stop; + ThreadPool pool(10); + + for (int i = 0; i < 10; ++i) { + const int64_t sampling_stride = 11 + i % 3; + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { + std::random_device rd; + std::mt19937 gen(rd()); + + std::vector infoz; + while (!stop.HasBeenNotified()) { + if (infoz.empty()) { + infoz.push_back(sampler.Register(sampling_stride, elt_size)); + } + switch (std::uniform_int_distribution<>(0, 2)(gen)) { + case 0: { + infoz.push_back(sampler.Register(sampling_stride, elt_size)); + break; + } + case 1: { + size_t p = + std::uniform_int_distribution<>(0, infoz.size() - 1)(gen); + HashtablezInfo* info = infoz[p]; + infoz[p] = infoz.back(); + infoz.pop_back(); + EXPECT_EQ(info->weight, sampling_stride); + sampler.Unregister(info); + break; + } + case 2: { + absl::Duration oldest = absl::ZeroDuration(); + sampler.Iterate([&](const HashtablezInfo& info) { + oldest = std::max(oldest, absl::Now() - info.create_time); + }); + ASSERT_GE(oldest, absl::ZeroDuration()); + break; + } + } + } + }); + } + // The threads will hammer away. Give it a little bit of time for tsan to + // spot errors. + absl::SleepFor(absl::Seconds(3)); + stop.Notify(); +} + +TEST(HashtablezSamplerTest, Callback) { + HashtablezSampler sampler; + + auto* info1 = Register(&sampler, 1); + auto* info2 = Register(&sampler, 2); + + static const HashtablezInfo* expected; + + auto callback = [](const HashtablezInfo& info) { + // We can't use `info` outside of this callback because the object will be + // disposed as soon as we return from here. + EXPECT_EQ(&info, expected); + }; + + // Set the callback. + EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); + expected = info1; + sampler.Unregister(info1); + + // Unset the callback. + EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); + expected = nullptr; // no more calls. + sampler.Unregister(info2); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/inlined_vector.h b/third_party/absl/absl/container/internal/inlined_vector.h new file mode 100644 index 000000000..0eb9c34dd --- /dev/null +++ b/third_party/absl/absl/container/internal/inlined_vector.h @@ -0,0 +1,1101 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inlined_vector_internal { + +// GCC does not deal very well with the below code +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif + +template +using AllocatorTraits = std::allocator_traits; +template +using ValueType = typename AllocatorTraits::value_type; +template +using SizeType = typename AllocatorTraits::size_type; +template +using Pointer = typename AllocatorTraits::pointer; +template +using ConstPointer = typename AllocatorTraits::const_pointer; +template +using SizeType = typename AllocatorTraits::size_type; +template +using DifferenceType = typename AllocatorTraits::difference_type; +template +using Reference = ValueType&; +template +using ConstReference = const ValueType&; +template +using Iterator = Pointer; +template +using ConstIterator = ConstPointer; +template +using ReverseIterator = typename std::reverse_iterator>; +template +using ConstReverseIterator = typename std::reverse_iterator>; +template +using MoveIterator = typename std::move_iterator>; + +template +using IsAtLeastForwardIterator = std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>; + +template +using IsMoveAssignOk = std::is_move_assignable>; +template +using IsSwapOk = absl::type_traits_internal::IsSwappable>; + +template +struct TypeIdentity { + using type = T; +}; + +// Used for function arguments in template functions to prevent ADL by forcing +// callers to explicitly specify the template parameter. +template +using NoTypeDeduction = typename TypeIdentity::type; + +template >::value> +struct DestroyAdapter; + +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + for (SizeType i = destroy_size; i != 0;) { + --i; + AllocatorTraits::destroy(allocator, destroy_first + i); + } + } +}; + +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } +}; + +template +struct Allocation { + Pointer data = nullptr; + SizeType capacity = 0; +}; + +template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> +struct MallocAdapter { + static Allocation Allocate(A& allocator, SizeType requested_capacity) { + return {AllocatorTraits::allocate(allocator, requested_capacity), + requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, + SizeType capacity) { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } +}; + +template +void ConstructElements(NoTypeDeduction& allocator, + Pointer construct_first, ValueAdapter& values, + SizeType construct_size) { + for (SizeType i = 0; i < construct_size; ++i) { + ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } + ABSL_INTERNAL_CATCH_ANY { + DestroyAdapter::DestroyElements(allocator, construct_first, i); + ABSL_INTERNAL_RETHROW; + } + } +} + +template +void AssignElements(Pointer assign_first, ValueAdapter& values, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values.AssignNext(assign_first + i); + } +} + +template +struct StorageView { + Pointer data; + SizeType size; + SizeType capacity; +}; + +template +class IteratorValueAdapter { + public: + explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *it_); + ++it_; + } + + void AssignNext(Pointer assign_at) { + *assign_at = *it_; + ++it_; + } + + private: + Iterator it_; +}; + +template +class CopyValueAdapter { + public: + explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *ptr_); + } + + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } + + private: + ConstPointer ptr_; +}; + +template +class DefaultValueAdapter { + public: + explicit DefaultValueAdapter() {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at); + } + + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } +}; + +template +class AllocationTransaction { + public: + explicit AllocationTransaction(A& allocator) + : allocator_data_(allocator, nullptr), capacity_(0) {} + + ~AllocationTransaction() { + if (DidAllocate()) { + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } + + bool DidAllocate() { return GetData() != nullptr; } + + Pointer Allocate(SizeType requested_capacity) { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; + } + + ABSL_MUST_USE_RESULT Allocation Release() && { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: + void Reset() { + GetData() = nullptr; + GetCapacity() = 0; + } + + container_internal::CompressedTuple> allocator_data_; + SizeType capacity_; +}; + +template +class ConstructionTransaction { + public: + explicit ConstructionTransaction(A& allocator) + : allocator_data_(allocator, nullptr), size_(0) {} + + ~ConstructionTransaction() { + if (DidConstruct()) { + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); + } + } + + ConstructionTransaction(const ConstructionTransaction&) = delete; + void operator=(const ConstructionTransaction&) = delete; + + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetSize() { return size_; } + + bool DidConstruct() { return GetData() != nullptr; } + template + void Construct(Pointer data, ValueAdapter& values, SizeType size) { + ConstructElements(GetAllocator(), data, values, size); + GetData() = data; + GetSize() = size; + } + void Commit() && { + GetData() = nullptr; + GetSize() = 0; + } + + private: + container_internal::CompressedTuple> allocator_data_; + SizeType size_; +}; + +template +class Storage { + public: + struct MemcpyPolicy {}; + struct ElementwiseAssignPolicy {}; + struct ElementwiseSwapPolicy {}; + struct ElementwiseConstructPolicy {}; + + using MoveAssignmentPolicy = absl::conditional_t< + // Fast path: if the value type can be trivially move assigned and + // destroyed, and we know the allocator doesn't do anything fancy, then + // it's safe for us to simply adopt the contents of the storage for + // `other` and remove its own reference to them. It's as if we had + // individually move-assigned each value and then destroyed the original. + absl::conjunction>, + absl::is_trivially_destructible>, + std::is_same>>>::value, + MemcpyPolicy, + // Otherwise we use move assignment if possible. If not, we simulate + // move assignment using move construction. + // + // Note that this is in contrast to e.g. std::vector and std::optional, + // which are themselves not move-assignable when their contained type is + // not. + absl::conditional_t::value, ElementwiseAssignPolicy, + ElementwiseConstructPolicy>>; + + // The policy to be used specifically when swapping inlined elements. + using SwapInlinedElementsPolicy = absl::conditional_t< + // Fast path: if the value type can be trivially move constructed/assigned + // and destroyed, and we know the allocator doesn't do anything fancy, + // then it's safe for us to simply swap the bytes in the inline storage. + // It's as if we had move-constructed a temporary vector, move-assigned + // one to the other, then move-assigned the first from the temporary. + absl::conjunction>, + absl::is_trivially_move_assignable>, + absl::is_trivially_destructible>, + std::is_same>>>::value, + MemcpyPolicy, + absl::conditional_t::value, ElementwiseSwapPolicy, + ElementwiseConstructPolicy>>; + + static SizeType NextCapacity(SizeType current_capacity) { + return current_capacity * 2; + } + + static SizeType ComputeCapacity(SizeType current_capacity, + SizeType requested_capacity) { + return (std::max)(NextCapacity(current_capacity), requested_capacity); + } + + // --------------------------------------------------------------------------- + // Storage Constructors and Destructor + // --------------------------------------------------------------------------- + + Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} + + explicit Storage(const A& allocator) + : metadata_(allocator, /* size and is_allocated */ 0u) {} + + ~Storage() { + // Fast path: if we are empty and not allocated, there's nothing to do. + if (GetSizeAndIsAllocated() == 0) { + return; + } + + // Fast path: if no destructors need to be run and we know the allocator + // doesn't do anything fancy, then all we need to do is deallocate (and + // maybe not even that). + if (absl::is_trivially_destructible>::value && + std::is_same>>::value) { + DeallocateIfAllocated(); + return; + } + + DestroyContents(); + } + + // --------------------------------------------------------------------------- + // Storage Member Accessors + // --------------------------------------------------------------------------- + + SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } + + const SizeType& GetSizeAndIsAllocated() const { + return metadata_.template get<1>(); + } + + SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } + + bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } + + Pointer GetAllocatedData() { + // GCC 12 has a false-positive -Wmaybe-uninitialized warning here. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return data_.allocated.allocated_data; +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + } + + ConstPointer GetAllocatedData() const { + return data_.allocated.allocated_data; + } + + // ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be + // uninitialized, a common pattern in allocate()+construct() APIs. + // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking + // NOTE: When this was written, LLVM documentation did not explicitly + // mention that casting `char*` and using `reinterpret_cast` qualifies + // as a bad cast. + ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer GetInlinedData() { + return reinterpret_cast>(data_.inlined.inlined_data); + } + + ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer GetInlinedData() const { + return reinterpret_cast>(data_.inlined.inlined_data); + } + + SizeType GetAllocatedCapacity() const { + return data_.allocated.allocated_capacity; + } + + SizeType GetInlinedCapacity() const { + return static_cast>(kOptimalInlinedSize); + } + + StorageView MakeStorageView() { + return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), + GetInlinedCapacity()}; + } + + A& GetAllocator() { return metadata_.template get<0>(); } + + const A& GetAllocator() const { return metadata_.template get<0>(); } + + // --------------------------------------------------------------------------- + // Storage Member Mutators + // --------------------------------------------------------------------------- + + ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); + + template + void Initialize(ValueAdapter values, SizeType new_size); + + template + void Assign(ValueAdapter values, SizeType new_size); + + template + void Resize(ValueAdapter values, SizeType new_size); + + template + Iterator Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count); + + template + Reference EmplaceBack(Args&&... args); + + Iterator Erase(ConstIterator from, ConstIterator to); + + void Reserve(SizeType requested_capacity); + + void ShrinkToFit(); + + void Swap(Storage* other_storage_ptr); + + void SetIsAllocated() { + GetSizeAndIsAllocated() |= static_cast>(1); + } + + void UnsetIsAllocated() { + GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); + } + + void SetSize(SizeType size) { + GetSizeAndIsAllocated() = + (size << 1) | static_cast>(GetIsAllocated()); + } + + void SetAllocatedSize(SizeType size) { + GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); + } + + void SetInlinedSize(SizeType size) { + GetSizeAndIsAllocated() = size << static_cast>(1); + } + + void AddSize(SizeType count) { + GetSizeAndIsAllocated() += count << static_cast>(1); + } + + void SubtractSize(SizeType count) { + ABSL_HARDENING_ASSERT(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast>(1); + } + + void SetAllocation(Allocation allocation) { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; + } + + void MemcpyFrom(const Storage& other_storage) { + // Assumption check: it doesn't make sense to memcpy inlined elements unless + // we know the allocator doesn't do anything fancy, and one of the following + // holds: + // + // * The elements are trivially relocatable. + // + // * It's possible to trivially assign the elements and then destroy the + // source. + // + // * It's possible to trivially copy construct/assign the elements. + // + { + using V = ValueType; + ABSL_HARDENING_ASSERT( + other_storage.GetIsAllocated() || + (std::is_same>::value && + ( + // First case above + absl::is_trivially_relocatable::value || + // Second case above + (absl::is_trivially_move_assignable::value && + absl::is_trivially_destructible::value) || + // Third case above + (absl::is_trivially_copy_constructible::value || + absl::is_trivially_copy_assignable::value)))); + } + + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; + } + + void DeallocateIfAllocated() { + if (GetIsAllocated()) { + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), + GetAllocatedCapacity()); + } + } + + private: + ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); + + using Metadata = container_internal::CompressedTuple>; + + struct Allocated { + Pointer allocated_data; + SizeType allocated_capacity; + }; + + // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the + // `InlinedVector`. Sometimes, it is possible to increase the capacity (from + // the user requested `N`) without increasing the size of the `InlinedVector`. + static constexpr size_t kOptimalInlinedSize = + (std::max)(N, sizeof(Allocated) / sizeof(ValueType)); + + struct Inlined { + alignas(ValueType) char inlined_data[sizeof( + ValueType[kOptimalInlinedSize])]; + }; + + union Data { + Allocated allocated; + Inlined inlined; + }; + + void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n); + void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n); + + void SwapInlinedElements(MemcpyPolicy, Storage* other); + template + void SwapInlinedElements(NotMemcpyPolicy, Storage* other); + + template + ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); + + Metadata metadata_; + Data data_; +}; + +template +void Storage::DestroyContents() { + Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); + DeallocateIfAllocated(); +} + +template +void Storage::InitFrom(const Storage& other) { + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. + ConstPointer src; + Pointer dst; + if (!other.GetIsAllocated()) { + dst = GetInlinedData(); + src = other.GetInlinedData(); + } else { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; + src = other.GetAllocatedData(); + } + + // Fast path: if the value type is trivially copy constructible and we know + // the allocator doesn't do anything fancy, then we know it is legal for us to + // simply memcpy the other vector's elements. + if (absl::is_trivially_copy_constructible>::value && + std::is_same>>::value) { + std::memcpy(reinterpret_cast(dst), + reinterpret_cast(src), n * sizeof(ValueType)); + } else { + auto values = IteratorValueAdapter>(src); + ConstructElements(GetAllocator(), dst, values, n); + } + + GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); +} + +template +template +auto Storage::Initialize(ValueAdapter values, SizeType new_size) + -> void { + // Only callable from constructors! + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); + + Pointer construct_data; + if (new_size > GetInlinedCapacity()) { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); + SetIsAllocated(); + } else { + construct_data = GetInlinedData(); + } + + ConstructElements(GetAllocator(), construct_data, values, new_size); + + // Since the initial size was guaranteed to be `0` and the allocated bit is + // already correct for either case, *adding* `new_size` gives us the correct + // result faster than setting it directly. + AddSize(new_size); +} + +template +template +auto Storage::Assign(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocator()); + + absl::Span> assign_loop; + absl::Span> construct_loop; + absl::Span> destroy_loop; + + if (new_size > storage_view.capacity) { + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; + } else { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + AssignElements(assign_loop.data(), values, assign_loop.size()); + + ConstructElements(GetAllocator(), construct_loop.data(), values, + construct_loop.size()); + + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), + destroy_loop.size()); + + if (allocation_tx.DidAllocate()) { + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + SetSize(new_size); +} + +template +template +auto Storage::Resize(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); + Pointer const base = storage_view.data; + const SizeType size = storage_view.size; + A& alloc = GetAllocator(); + if (new_size <= size) { + // Destroy extra old elements. + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); + } else if (new_size <= storage_view.capacity) { + // Construct new elements in place. + ConstructElements(alloc, base + size, values, new_size - size); + } else { + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to new backing store. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, values, new_size - size); + + IteratorValueAdapter> move_values( + (MoveIterator(base))); + ConstructElements(alloc, new_data, move_values, size); + + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + SetSize(new_size); +} + +template +template +auto Storage::Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count) -> Iterator { + StorageView storage_view = MakeStorageView(); + + auto insert_index = static_cast>( + std::distance(ConstIterator(storage_view.data), pos)); + SizeType insert_end_index = insert_index + insert_count; + SizeType new_size = storage_view.size + insert_count; + + if (new_size > storage_view.capacity) { + AllocationTransaction allocation_tx(GetAllocator()); + ConstructionTransaction construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + construction_tx.Construct(new_data + insert_index, values, insert_count); + + move_construction_tx.Construct(new_data, move_values, insert_index); + + ConstructElements(GetAllocator(), new_data + insert_end_index, + move_values, storage_view.size - insert_index); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + + SetAllocatedSize(new_size); + return Iterator(new_data + insert_index); + } else { + SizeType move_construction_destination_index = + (std::max)(insert_end_index, storage_view.size); + + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_construction_values( + MoveIterator(storage_view.data + + (move_construction_destination_index - insert_count))); + absl::Span> move_construction = { + storage_view.data + move_construction_destination_index, + new_size - move_construction_destination_index}; + + Pointer move_assignment_values = storage_view.data + insert_index; + absl::Span> move_assignment = { + storage_view.data + insert_end_index, + move_construction_destination_index - insert_end_index}; + + absl::Span> insert_assignment = {move_assignment_values, + move_construction.size()}; + + absl::Span> insert_construction = { + insert_assignment.data() + insert_assignment.size(), + insert_count - insert_assignment.size()}; + + move_construction_tx.Construct(move_construction.data(), + move_construction_values, + move_construction.size()); + + for (Pointer + destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); + ;) { + --destination; + --source; + if (destination < last_destination) break; + *destination = std::move(*source); + } + + AssignElements(insert_assignment.data(), values, + insert_assignment.size()); + + ConstructElements(GetAllocator(), insert_construction.data(), values, + insert_construction.size()); + + std::move(move_construction_tx).Commit(); + + AddSize(insert_count); + return Iterator(storage_view.data + insert_index); + } +} + +template +template +auto Storage::EmplaceBack(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + const SizeType n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { + // Fast path; new element fits. + Pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + AddSize(1); + return *last_ptr; + } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward(args)...); +} + +template +template +auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + AllocationTransaction allocation_tx(GetAllocator()); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); + Pointer last_ptr = construct_data + storage_view.size; + + // Construct new element. + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY { + ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(GetAllocator(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + AddSize(1); + return *last_ptr; +} + +template +auto Storage::Erase(ConstIterator from, ConstIterator to) + -> Iterator { + StorageView storage_view = MakeStorageView(); + + auto erase_size = static_cast>(std::distance(from, to)); + auto erase_index = static_cast>( + std::distance(ConstIterator(storage_view.data), from)); + SizeType erase_end_index = erase_index + erase_size; + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data + erase_end_index)); + + AssignElements(storage_view.data + erase_index, move_values, + storage_view.size - erase_end_index); + + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), + erase_size); + + SubtractSize(erase_size); + return Iterator(storage_view.data + erase_index); +} + +template +auto Storage::Reserve(SizeType requested_capacity) -> void { + StorageView storage_view = MakeStorageView(); + + if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + SizeType new_requested_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); + + ConstructElements(GetAllocator(), new_data, move_values, + storage_view.size); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); +} + +template +auto Storage::ShrinkToFit() -> void { + // May only be called on allocated instances! + ABSL_HARDENING_ASSERT(GetIsAllocated()); + + StorageView storage_view{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; + + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + Pointer construct_data; + if (storage_view.size > GetInlinedCapacity()) { + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) { + // Already using the smallest available heap allocation. + return; + } + } else { + construct_data = GetInlinedData(); + } + + ABSL_INTERNAL_TRY { + ConstructElements(GetAllocator(), construct_data, move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + SetAllocation({storage_view.data, storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, + storage_view.capacity); + + if (allocation_tx.DidAllocate()) { + SetAllocation(std::move(allocation_tx).Release()); + } else { + UnsetIsAllocated(); + } +} + +template +auto Storage::Swap(Storage* other_storage_ptr) -> void { + using std::swap; + ABSL_HARDENING_ASSERT(this != other_storage_ptr); + + if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { + swap(data_.allocated, other_storage_ptr->data_.allocated); + } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { + SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr); + } else { + Storage* allocated_ptr = this; + Storage* inlined_ptr = other_storage_ptr; + if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); + + StorageView allocated_storage_view{ + allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), + allocated_ptr->GetAllocatedCapacity()}; + + IteratorValueAdapter> move_values( + MoveIterator(inlined_ptr->GetInlinedData())); + + ABSL_INTERNAL_TRY { + ConstructElements(inlined_ptr->GetAllocator(), + allocated_ptr->GetInlinedData(), move_values, + inlined_ptr->GetSize()); + } + ABSL_INTERNAL_CATCH_ANY { + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); + + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, + allocated_storage_view.capacity}); + } + + swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); + swap(GetAllocator(), other_storage_ptr->GetAllocator()); +} + +template +void Storage::SwapN(ElementwiseSwapPolicy, Storage* other, + SizeType n) { + std::swap_ranges(GetInlinedData(), GetInlinedData() + n, + other->GetInlinedData()); +} + +template +void Storage::SwapN(ElementwiseConstructPolicy, Storage* other, + SizeType n) { + Pointer a = GetInlinedData(); + Pointer b = other->GetInlinedData(); + // see note on allocators in `SwapInlinedElements`. + A& allocator_a = GetAllocator(); + A& allocator_b = other->GetAllocator(); + for (SizeType i = 0; i < n; ++i, ++a, ++b) { + ValueType tmp(std::move(*a)); + + AllocatorTraits::destroy(allocator_a, a); + AllocatorTraits::construct(allocator_b, a, std::move(*b)); + + AllocatorTraits::destroy(allocator_b, b); + AllocatorTraits::construct(allocator_a, b, std::move(tmp)); + } +} + +template +void Storage::SwapInlinedElements(MemcpyPolicy, Storage* other) { + Data tmp = data_; + data_ = other->data_; + other->data_ = tmp; +} + +template +template +void Storage::SwapInlinedElements(NotMemcpyPolicy policy, + Storage* other) { + // Note: `destroy` needs to use pre-swap allocator while `construct` - + // post-swap allocator. Allocators will be swapped later on outside of + // `SwapInlinedElements`. + Storage* small_ptr = this; + Storage* large_ptr = other; + if (small_ptr->GetSize() > large_ptr->GetSize()) { + std::swap(small_ptr, large_ptr); + } + + auto small_size = small_ptr->GetSize(); + auto diff = large_ptr->GetSize() - small_size; + SwapN(policy, other, small_size); + + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_size)); + + ConstructElements(large_ptr->GetAllocator(), + small_ptr->GetInlinedData() + small_size, move_values, + diff); + + DestroyAdapter::DestroyElements(large_ptr->GetAllocator(), + large_ptr->GetInlinedData() + small_size, + diff); +} + +// End ignore "array-bounds" +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +} // namespace inlined_vector_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ diff --git a/third_party/absl/absl/container/internal/layout.h b/third_party/absl/absl/container/internal/layout.h new file mode 100644 index 000000000..a4ba61014 --- /dev/null +++ b/third_party/absl/absl/container/internal/layout.h @@ -0,0 +1,728 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[0] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template +struct Aligned; + +namespace internal_layout { + +template +struct NotAligned {}; + +template +struct NotAligned> { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); +}; + +template +using IntToSize = size_t; + +template +using TypeToSize = size_t; + +template +struct Type : NotAligned { + using type = T; +}; + +template +struct Type> { + using type = T; +}; + +template +struct SizeOf : NotAligned, std::integral_constant {}; + +template +struct SizeOf> : std::integral_constant {}; + +// Note: workaround for https://gcc.gnu.org/PR88115 +template +struct AlignOf : NotAligned { + static constexpr size_t value = alignof(T); +}; + +template +struct AlignOf> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; +}; + +// Does `Ts...` contain `T`? +template +using Contains = absl::disjunction...>; + +template +using CopyConst = + typename std::conditional::value, const To, To>::type; + +// Note: We're not qualifying this with absl:: because it doesn't compile under +// MSVC. +template +using SliceType = Span; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains(), "Duplicate element type"); + return 0; +} + +template +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template +std::string TypeName() { + std::string out; +#if ABSL_INTERNAL_HAS_RTTI + absl::StrAppend(&out, "<", + absl::debugging_internal::DemangleString(typeid(T).name()), + ">"); +#endif + return out; +} + +} // namespace adl_barrier + +template +using EnableIf = typename std::enable_if::type; + +// Can `T` be a template argument of `Layout`? +template +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + +template +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template +class LayoutImpl, absl::index_sequence, + absl::index_sequence> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() { + static_assert(Contains, Type::type>...>(), + "Type not found"); + return adl_barrier::Find(Type(), + Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const { + return 0; + } + + template = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>::value * size_[N - 1], + ElementAlignment::value); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const { + return std::tuple>*...>( + Pointer(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>::value * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) { + size_t start = + Offset() + SizeOf>::value * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>::value...}; + const std::string types[] = { + adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template +using LayoutType = LayoutImpl< + std::tuple, absl::make_index_sequence, + absl::make_index_sequence>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template +class Layout : public internal_layout::LayoutType { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) + : internal_layout::LayoutType(sizes...) {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/third_party/absl/absl/container/internal/layout_benchmark.cc b/third_party/absl/absl/container/internal/layout_benchmark.cc new file mode 100644 index 000000000..3af35e33e --- /dev/null +++ b/third_party/absl/absl/container/internal/layout_benchmark.cc @@ -0,0 +1,122 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Every benchmark should have the same performance as the corresponding +// headroom benchmark. + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/layout.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::benchmark::DoNotOptimize; + +using Int128 = int64_t[2]; + +// This benchmark provides the upper bound on performance for BM_OffsetConstant. +template +void BM_OffsetConstantHeadroom(benchmark::State& state) { + for (auto _ : state) { + DoNotOptimize(Offset); + } +} + +template +void BM_OffsetConstant(benchmark::State& state) { + using L = Layout; + ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset, + "Invalid offset"); + for (auto _ : state) { + DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>()); + } +} + +template +size_t VariableOffset(size_t n, size_t m, size_t k); + +template <> +size_t VariableOffset(size_t n, size_t m, + size_t k) { + auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }; + return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8); +} + +template <> +size_t VariableOffset(size_t n, size_t m, + size_t k) { + // No alignment is necessary. + return n * 16 + m * 4 + k * 2; +} + +// This benchmark provides the upper bound on performance for BM_OffsetVariable. +template +void BM_OffsetVariableHeadroom(benchmark::State& state) { + size_t n = 3; + size_t m = 5; + size_t k = 7; + ABSL_RAW_CHECK(VariableOffset(n, m, k) == Offset, "Invalid offset"); + for (auto _ : state) { + DoNotOptimize(n); + DoNotOptimize(m); + DoNotOptimize(k); + DoNotOptimize(VariableOffset(n, m, k)); + } +} + +template +void BM_OffsetVariable(benchmark::State& state) { + using L = Layout; + size_t n = 3; + size_t m = 5; + size_t k = 7; + ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, + "Invalid offset"); + for (auto _ : state) { + DoNotOptimize(n); + DoNotOptimize(m); + DoNotOptimize(k); + DoNotOptimize(L::Partial(n, m, k).template Offset<3>()); + } +} + +// Run all benchmarks in two modes: +// +// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?]. +// Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?]. + +#define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \ + auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \ + NAME; \ + BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4) + +OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t, + Int128); +OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128); +OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t, + int8_t); +OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t); +OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t, + Int128); +OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128); +OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t, + int8_t); +OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t); +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/layout_test.cc b/third_party/absl/absl/container/internal/layout_test.cc new file mode 100644 index 000000000..ae55cf7e5 --- /dev/null +++ b/third_party/absl/absl/container/internal/layout_test.cc @@ -0,0 +1,1646 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/log/check.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + CHECK_LE(from, to) << "Distance must be non-negative"; + return static_cast(to) - static_cast(from); +} + +template +Expected Type(Actual val) { + static_assert(std::is_same(), ""); + return val; +} + +// Helper classes to test different size and alignments. +struct alignas(8) Int128 { + uint64_t a, b; + friend bool operator==(Int128 lhs, Int128 rhs) { + return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); + } + + static std::string Name() { + return internal_layout::adl_barrier::TypeName(); + } +}; + +// int64_t is *not* 8-byte aligned on all platforms! +struct alignas(8) Int64 { + int64_t a; + friend bool operator==(Int64 lhs, Int64 rhs) { + return lhs.a == rhs.a; + } +}; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int64) == 8, ""); +static_assert(alignof(Int64) == 8, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template +void SameType() { + static_assert(std::is_same(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(3).Offset()); + EXPECT_EQ(0, L(3).Offset()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(1).Offset()); + EXPECT_EQ(4, L::Partial(1).Offset()); + EXPECT_EQ(0, L::Partial(5).Offset()); + EXPECT_EQ(8, L::Partial(5).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3).Offset()); + EXPECT_EQ(8, L::Partial(5, 3).Offset()); + EXPECT_EQ(24, L::Partial(5, 3).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(0, L(5, 3, 1).Offset()); + EXPECT_EQ(24, L(5, 3, 1).Offset()); + EXPECT_EQ(8, L(5, 3, 1).Offset()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size()); + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L(3).Size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L::Partial(3, 5).Size()); + EXPECT_EQ(5, L::Partial(3, 5).Size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(3, L(3, 5, 7).Size()); + EXPECT_EQ(5, L(3, 5, 7).Size()); + EXPECT_EQ(7, L(3, 5, 7).Size()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ( + 12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type( + L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {0}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100] = {0}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance( + p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice(p)) + .data())); + EXPECT_EQ(4, Distance(p, Type>( + L::Partial(1, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(5, 3).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(4, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ(24, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, + Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100] = {0}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element::type&>( + std::get(matchers_)) + .Matches(std::get(p))...}); + } + + std::tuple matchers_; +}; + +template +testing::PolymorphicMatcher> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {0}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {0}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, Span>>( + x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, Span>>( + x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); +#ifdef __GNUC__ + // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357 + __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()]; +#else + alignas(2 * M) unsigned char p[x.AllocSize()]; +#endif + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout::Alignment() == 1, ""); + static_assert(Layout::Alignment() == 4, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout>::Alignment() == 64, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + { + constexpr auto x = Layout::Partial(); + EXPECT_EQ("@0(1)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1); + EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2); + EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", + x.DebugString()); + } + { + constexpr auto x = + Layout::Partial(1, 2, 3); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)", + x.DebugString()); + } + { + constexpr auto x = + Layout::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } + { + constexpr Layout x(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type(x.Pointer<0>(c)); + Type(x.Pointer<0>(uc)); + Type(x.Pointer<0>(sc)); + Type(x.Pointer<0>(cc)); + Type(x.Pointer<0>(cuc)); + Type(x.Pointer<0>(csc)); + + Type(x.Pointer(c)); + Type(x.Pointer(uc)); + Type(x.Pointer(sc)); + Type(x.Pointer(cc)); + Type(x.Pointer(cuc)); + Type(x.Pointer(csc)); + + Type>(x.Pointers(c)); + Type>(x.Pointers(uc)); + Type>(x.Pointers(sc)); + Type>(x.Pointers(cc)); + Type>(x.Pointers(cuc)); + Type>(x.Pointers(csc)); + + Type>(x.Slice<0>(c)); + Type>(x.Slice<0>(uc)); + Type>(x.Slice<0>(sc)); + Type>(x.Slice<0>(cc)); + Type>(x.Slice<0>(cuc)); + Type>(x.Slice<0>(csc)); + + Type>>(x.Slices(c)); + Type>>(x.Slices(uc)); + Type>>(x.Slices(sc)); + Type>>(x.Slices(cc)); + Type>>(x.Slices(cuc)); + Type>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type(x.Pointer<0>(c))); + EXPECT_EQ(p, Type(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type(x.Pointer(c))); + EXPECT_EQ(p, Type(x.Pointer(cc))); + + EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type>(x.Slice<0>(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice<0>(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>(x.Slice(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>>(x.Slices(c)), + Tuple(IsSameSlice(Span(p, 1)))); + EXPECT_THAT(Type>>(x.Slices(cc)), + Tuple(IsSameSlice(Span(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer() is a synonym for Pointer<0>(). + *layout.Pointer(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast(*p). + return *L::Partial().Pointer(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the characters. + return L::Partial(1).Pointer(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout; + std::unique_ptr p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/node_slot_policy.h b/third_party/absl/absl/container/internal/node_slot_policy.h new file mode 100644 index 000000000..3f1874d4e --- /dev/null +++ b/third_party/absl/absl/container/internal/node_slot_policy.h @@ -0,0 +1,95 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct node_slot_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + // Returns true_type to indicate that transfer can use memcpy. + template + static std::true_type transfer(Alloc*, slot_type* new_slot, + slot_type* old_slot) { + *new_slot = *old_slot; + return {}; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ diff --git a/third_party/absl/absl/container/internal/node_slot_policy_test.cc b/third_party/absl/absl/container/internal/node_slot_policy_test.cc new file mode 100644 index 000000000..d4ea919d2 --- /dev/null +++ b/third_party/absl/absl/container/internal/node_slot_policy_test.cc @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_slot_policy.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_slot_policy { + using key_type = int; + using init_type = int; + + template + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits; + +struct NodeTest : ::testing::Test { + std::allocator alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); + EXPECT_TRUE(NodePolicy::transfer_uses_memcpy()); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/raw_hash_map.h b/third_party/absl/absl/container/internal/raw_hash_map.h new file mode 100644 index 000000000..97182bc7b --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_map.h @@ -0,0 +1,224 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class raw_hash_map : public raw_hash_set { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + + // TODO(b/187807849): Evaluate whether to support reference mapped_type and + // remove this assertion if/when it is supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, + V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, + const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, + V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, + const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert_or_assign(k, v).first; + } + + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, + Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, + Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + // It is safe to use unchecked_deref here because try_emplace + // will always return an iterator pointing to a valid item in the table, + // since it inserts if nothing is found for the given key. + return Policy::value( + &this->unchecked_deref(try_emplace(std::forward(key)).first)); + } + + template + MappedReference

operator[](const key_arg& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + // It is safe to use unchecked_deref here because try_emplace + // will always return an iterator pointing to a valid item in the table, + // since it inserts if nothing is found for the given key. + return Policy::value(&this->unchecked_deref(try_emplace(key).first)); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/third_party/absl/absl/container/internal/raw_hash_set.cc b/third_party/absl/absl/container/internal/raw_hash_set.cc new file mode 100644 index 000000000..9f8ea5198 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set.cc @@ -0,0 +1,380 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/container/internal/container_memory.h" +#include "absl/hash/hash.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// We have space for `growth_left` before a single block of control bytes. A +// single block of empty control bytes for tables without any slots allocated. +// This enables removing a branch in the hot path of find(). In order to ensure +// that the control bytes are aligned to 16, we have 16 bytes before the control +// bytes even though growth_left only needs 8. +constexpr ctrl_t ZeroCtrlT() { return static_cast(0); } +alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = { + ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), + ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), + ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), + ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), + ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr size_t Group::kWidth; +#endif + +namespace { + +// Returns "random" seed. +inline size_t RandomSeed() { +#ifdef ABSL_HAVE_THREAD_LOCAL + static thread_local size_t counter = 0; + // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when + // accessing thread local storage data from loaded libraries + // (https://github.com/google/sanitizers/issues/1265), for this reason counter + // needs to be annotated as initialized. + ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t)); + size_t value = ++counter; +#else // ABSL_HAVE_THREAD_LOCAL + static std::atomic counter(0); + size_t value = counter.fetch_add(1, std::memory_order_relaxed); +#endif // ABSL_HAVE_THREAD_LOCAL + return value ^ static_cast(reinterpret_cast(&counter)); +} + +bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) { + // Note: we can't use the abseil-random library because abseil-random + // depends on swisstable. We want to return true with probability + // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this, + // we probe based on a random hash and see if the offset is less than + // RehashProbabilityConstant(). + return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() < + RehashProbabilityConstant(); +} + +} // namespace + +GenerationType* EmptyGeneration() { + if (SwisstableGenerationsEnabled()) { + constexpr size_t kNumEmptyGenerations = 1024; + static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{}; + return const_cast( + &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]); + } + return nullptr; +} + +bool CommonFieldsGenerationInfoEnabled:: + should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, + size_t capacity) const { + if (reserved_growth_ == kReservedGrowthJustRanOut) return true; + if (reserved_growth_ > 0) return false; + return ShouldRehashForBugDetection(ctrl, capacity); +} + +bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move( + const ctrl_t* ctrl, size_t capacity) const { + return ShouldRehashForBugDetection(ctrl, capacity); +} + +bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { + // To avoid problems with weak hashes and single bit tests, we use % 13. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; +} + +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == ctrl_t::kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; +} +// Extern template instantiation for inline function. +template FindInfo find_first_non_full(const CommonFields&, size_t); + +FindInfo find_first_non_full_outofline(const CommonFields& common, + size_t hash) { + return find_first_non_full(common, hash); +} + +// Returns the address of the slot just after slot assuming each slot has the +// specified size. +static inline void* NextSlot(void* slot, size_t slot_size) { + return reinterpret_cast(reinterpret_cast(slot) + slot_size); +} + +// Returns the address of the slot just before slot assuming each slot has the +// specified size. +static inline void* PrevSlot(void* slot, size_t slot_size) { + return reinterpret_cast(reinterpret_cast(slot) - slot_size); +} + +void DropDeletesWithoutResize(CommonFields& common, + const PolicyFunctions& policy, void* tmp_space) { + void* set = &common; + void* slot_array = common.slot_array(); + const size_t capacity = common.capacity(); + assert(IsValidCapacity(capacity)); + assert(!is_small(capacity)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ctrl_t* ctrl = common.control(); + ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity); + auto hasher = policy.hash_slot; + auto transfer = policy.transfer; + const size_t slot_size = policy.slot_size; + + size_t total_probe_length = 0; + void* slot_ptr = SlotAddress(slot_array, 0, slot_size); + for (size_t i = 0; i != capacity; + ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) { + assert(slot_ptr == SlotAddress(slot_array, i, slot_size)); + if (!IsDeleted(ctrl[i])) continue; + const size_t hash = (*hasher)(set, slot_ptr); + const FindInfo target = find_first_non_full(common, hash); + const size_t new_i = target.offset; + total_probe_length += target.probe_length; + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const size_t probe_offset = probe(common, hash).offset(); + const auto probe_index = [probe_offset, capacity](size_t pos) { + return ((pos - probe_offset) & capacity) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + SetCtrl(common, i, H2(hash), slot_size); + continue; + } + + void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size); + if (IsEmpty(ctrl[new_i])) { + // Transfer element to the empty spot. + // SetCtrl poisons/unpoisons the slots so we have to call it at the + // right time. + SetCtrl(common, new_i, H2(hash), slot_size); + (*transfer)(set, new_slot_ptr, slot_ptr); + SetCtrl(common, i, ctrl_t::kEmpty, slot_size); + } else { + assert(IsDeleted(ctrl[new_i])); + SetCtrl(common, new_i, H2(hash), slot_size); + // Until we are done rehashing, DELETED marks previously FULL slots. + + // Swap i and new_i elements. + (*transfer)(set, tmp_space, new_slot_ptr); + (*transfer)(set, new_slot_ptr, slot_ptr); + (*transfer)(set, slot_ptr, tmp_space); + + // repeat the processing of the ith slot + --i; + slot_ptr = PrevSlot(slot_ptr, slot_size); + } + } + ResetGrowthLeft(common); + common.infoz().RecordRehash(total_probe_length); +} + +static bool WasNeverFull(CommonFields& c, size_t index) { + if (is_single_group(c.capacity())) { + return true; + } + const size_t index_before = (index - Group::kWidth) & c.capacity(); + const auto empty_after = Group(c.control() + index).MaskEmpty(); + const auto empty_before = Group(c.control() + index_before).MaskEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + return empty_before && empty_after && + static_cast(empty_after.TrailingZeros()) + + empty_before.LeadingZeros() < + Group::kWidth; +} + +void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) { + assert(IsFull(c.control()[index]) && "erasing a dangling iterator"); + c.decrement_size(); + c.infoz().RecordErase(); + + if (WasNeverFull(c, index)) { + SetCtrl(c, index, ctrl_t::kEmpty, slot_size); + c.set_growth_left(c.growth_left() + 1); + return; + } + + SetCtrl(c, index, ctrl_t::kDeleted, slot_size); +} + +void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, + bool reuse) { + c.set_size(0); + if (reuse) { + ResetCtrl(c, policy.slot_size); + ResetGrowthLeft(c); + c.infoz().RecordStorageChanged(0, c.capacity()); + } else { + // We need to record infoz before calling dealloc, which will unregister + // infoz. + c.infoz().RecordClearedReservation(); + c.infoz().RecordStorageChanged(0, 0); + (*policy.dealloc)(c, policy); + c.set_control(EmptyGroup()); + c.set_generation_ptr(EmptyGeneration()); + c.set_slots(nullptr); + c.set_capacity(0); + } +} + +void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes( + ctrl_t* new_ctrl, size_t new_capacity) const { + assert(is_single_group(new_capacity)); + constexpr size_t kHalfWidth = Group::kWidth / 2; + assert(old_capacity_ < kHalfWidth); + + const size_t half_old_capacity = old_capacity_ / 2; + + // NOTE: operations are done with compile time known size = kHalfWidth. + // Compiler optimizes that into single ASM operation. + + // Copy second half of bytes to the beginning. + // We potentially copy more bytes in order to have compile time known size. + // Mirrored bytes from the old_ctrl_ will also be copied. + // In case of old_capacity_ == 3, we will copy 1st element twice. + // Examples: + // old_ctrl = 0S0EEEEEEE... + // new_ctrl = S0EEEEEEEE... + // + // old_ctrl = 01S01EEEEE... + // new_ctrl = 1S01EEEEEE... + // + // old_ctrl = 0123456S0123456EE... + // new_ctrl = 456S0123?????????... + std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth); + // Clean up copied kSentinel from old_ctrl. + new_ctrl[half_old_capacity] = ctrl_t::kEmpty; + + // Clean up damaged or uninitialized bytes. + + // Clean bytes after the intended size of the copy. + // Example: + // new_ctrl = 1E01EEEEEEE???? + // *new_ctrl= 1E0EEEEEEEE???? + // position / + std::memset(new_ctrl + old_capacity_ + 1, static_cast(ctrl_t::kEmpty), + kHalfWidth); + // Clean non-mirrored bytes that are not initialized. + // For small old_capacity that may be inside of mirrored bytes zone. + // Examples: + // new_ctrl = 1E0EEEEEEEE??????????.... + // *new_ctrl= 1E0EEEEEEEEEEEEE?????.... + // position / + // + // new_ctrl = 456E0123???????????... + // *new_ctrl= 456E0123EEEEEEEE???... + // position / + std::memset(new_ctrl + kHalfWidth, static_cast(ctrl_t::kEmpty), + kHalfWidth); + // Clean last mirrored bytes that are not initialized + // and will not be overwritten by mirroring. + // Examples: + // new_ctrl = 1E0EEEEEEEEEEEEE???????? + // *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE + // position S / + // + // new_ctrl = 456E0123EEEEEEEE??????????????? + // *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE + // position S / + std::memset(new_ctrl + new_capacity + kHalfWidth, + static_cast(ctrl_t::kEmpty), kHalfWidth); + + // Create mirrored bytes. old_capacity_ < kHalfWidth + // Example: + // new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE + // *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE + // position S/ + ctrl_t g[kHalfWidth]; + std::memcpy(g, new_ctrl, kHalfWidth); + std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth); + + // Finally set sentinel to its place. + new_ctrl[new_capacity] = ctrl_t::kSentinel; +} + +void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots( + void* old_slots, void* new_slots, size_t slot_size) const { + assert(old_capacity_ > 0); + const size_t half_old_capacity = old_capacity_ / 2; + + SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + std::memcpy(new_slots, + SlotAddress(old_slots, half_old_capacity + 1, slot_size), + slot_size * half_old_capacity); + std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size), + old_slots, slot_size * (half_old_capacity + 1)); +} + +void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable( + CommonFields& c, void* old_slots, size_t slot_size) { + assert(old_capacity_ < Group::kWidth / 2); + assert(is_single_group(c.capacity())); + assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); + + GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity()); + GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(), + slot_size); + + // We poison since GrowIntoSingleGroupShuffleTransferableSlots + // may leave empty slots unpoisoned. + PoisonSingleGroupEmptySlots(c, slot_size); +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/raw_hash_set.h b/third_party/absl/absl/container/internal/raw_hash_set.h new file mode 100644 index 000000000..3518bc340 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set.h @@ -0,0 +1,3325 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// # Table Layout +// +// A raw_hash_set's backing array consists of control bytes followed by slots +// that may or may not contain objects. +// +// The layout of the backing array, for `capacity` slots, is thus, as a +// pseudo-struct: +// +// struct BackingArray { +// // Sampling handler. This field isn't present when the sampling is +// // disabled or this allocation hasn't been selected for sampling. +// HashtablezInfoHandle infoz_; +// // The number of elements we can insert before growing the capacity. +// size_t growth_left; +// // Control bytes for the "real" slots. +// ctrl_t ctrl[capacity]; +// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to +// // stop and serves no other purpose. +// ctrl_t sentinel; +// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so +// // that if a probe sequence picks a value near the end of `ctrl`, +// // `Group` will have valid control bytes to look at. +// ctrl_t clones[kWidth - 1]; +// // The actual slot data. +// slot_type slots[capacity]; +// }; +// +// The length of this array is computed by `AllocSize()` below. +// +// Control bytes (`ctrl_t`) are bytes (collected into groups of a +// platform-specific size) that define the state of the corresponding slot in +// the slot array. Group manipulation is tightly optimized to be as efficient +// as possible: SSE and friends on x86, clever bit operations on other arches. +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// Each control byte is either a special value for empty slots, deleted slots +// (sometimes called *tombstones*), and a special end-of-table marker used by +// iterators, or, if occupied, seven bits (H2) from the hash of the value in the +// corresponding slot. +// +// Storing control bytes in a separate array also has beneficial cache effects, +// since more logical slots will fit into a cache line. +// +// # Hashing +// +// We compute two separate hashes, `H1` and `H2`, from the hash of an object. +// `H1(hash(x))` is an index into `slots`, and essentially the starting point +// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out +// objects that cannot possibly be the one we are looking for. +// +// # Table operations. +// +// The key operations are `insert`, `find`, and `erase`. +// +// Since `insert` and `erase` are implemented in terms of `find`, we describe +// `find` first. To `find` a value `x`, we compute `hash(x)`. From +// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every +// group of slots in some interesting order. +// +// We now walk through these indices. At each index, we select the entire group +// starting with that index and extract potential candidates: occupied slots +// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the +// group, we stop and return an error. Each candidate slot `y` is compared with +// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the +// next probe index. Tombstones effectively behave like full slots that never +// match the value we're looking for. +// +// The `H2` bits ensure when we compare a slot to an object with `==`, we are +// likely to have actually found the object. That is, the chance is low that +// `==` is called and returns `false`. Thus, when we search for an object, we +// are unlikely to call `==` many times. This likelyhood can be analyzed as +// follows (assuming that H2 is a random enough hash function). +// +// Let's assume that there are `k` "wrong" objects that must be examined in a +// probe sequence. For example, when doing a `find` on an object that is in the +// table, `k` is the number of objects between the start of the probe sequence +// and the final found object (not including the final found object). The +// expected number of objects with an H2 match is then `k/128`. Measurements +// and analysis indicate that even at high load factors, `k` is less than 32, +// meaning that the number of "false positive" comparisons we must perform is +// less than 1/8 per `find`. + +// `insert` is implemented in terms of `unchecked_insert`, which inserts a +// value presumed to not be in the table (violating this requirement will cause +// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert +// it, we construct a `probe_seq` once again, and use it to find the first +// group with an unoccupied (empty *or* deleted) slot. We place `x` into the +// first such slot in the group and mark it as full with `x`'s H2. +// +// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and +// perform a `find` to see if it's already present; if it is, we're done. If +// it's not, we may decide the table is getting overcrowded (i.e. the load +// factor is greater than 7/8 for big tables; `is_small()` tables use a max load +// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` +// each element of the table into the new array (we know that no insertion here +// will insert an already-present value), and discard the old backing array. At +// this point, we may `unchecked_insert` the value `x`. +// +// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which +// presents a viable, initialized slot pointee to the caller. +// +// `erase` is implemented in terms of `erase_at`, which takes an index to a +// slot. Given an offset, we simply create a tombstone and destroy its contents. +// If we can prove that the slot would not appear in a probe sequence, we can +// make the slot as empty, instead. We can prove this by observing that if a +// group has any empty slots, it has never been full (assuming we never create +// an empty slot in a group with no empties, which this heuristic guarantees we +// never do) and find would stop at this group anyways (since it does not probe +// beyond groups with empties). +// +// `erase` is `erase_at` composed with `find`: if we +// have a value `x`, we can perform a `find`, and then `erase_at` the resulting +// slot. +// +// To iterate, we simply traverse the array, skipping empty and deleted slots +// and stopping when we hit a `kSentinel`. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/options.h" +#include "absl/base/port.h" +#include "absl/base/prefetch.h" +#include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS +#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the backing +// array and iterators. In the backing array, we store the generation between +// the control bytes and the slots. When iterators are dereferenced, we assert +// that the container has not been mutated in a way that could cause iterator +// invalidation since the iterator was initialized. +#define ABSL_SWISSTABLE_ENABLE_GENERATIONS +#endif + +// We use uint8_t so we don't need to worry about padding. +using GenerationType = uint8_t; + +// A sentinel value for empty generations. Using 0 makes it easy to constexpr +// initialize an array of this value. +constexpr GenerationType SentinelEmptyGeneration() { return 0; } + +constexpr GenerationType NextGeneration(GenerationType generation) { + return ++generation == SentinelEmptyGeneration() ? ++generation : generation; +} + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS +constexpr bool SwisstableGenerationsEnabled() { return true; } +constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); } +#else +constexpr bool SwisstableGenerationsEnabled() { return false; } +constexpr size_t NumGenerationBytes() { return 0; } +#endif + +template +void SwapAlloc(AllocType& lhs, AllocType& rhs, + std::true_type /* propagate_on_container_swap */) { + using std::swap; + swap(lhs, rhs); +} +template +void SwapAlloc(AllocType& lhs, AllocType& rhs, + std::false_type /* propagate_on_container_swap */) { + (void)lhs; + (void)rhs; + assert(lhs == rhs && + "It's UB to call swap with unequal non-propagating allocators."); +} + +template +void CopyAlloc(AllocType& lhs, AllocType& rhs, + std::true_type /* propagate_alloc */) { + lhs = rhs; +} +template +void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {} + +// The state for a probe sequence. +// +// Currently, the sequence is a triangular progression of the form +// +// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) +// +// The use of `Width` ensures that each probe step does not overlap groups; +// the sequence effectively outputs the addresses of *groups* (although not +// necessarily aligned to any boundary). The `Group` machinery allows us +// to check an entire group with minimal branching. +// +// Wrapping around at `mask + 1` is important, but not for the obvious reason. +// As described above, the first few entries of the control byte array +// are mirrored at the end of the array, which `Group` will find and use +// for selecting candidates. However, when those candidates' slots are +// actually inspected, there are no corresponding slots for the cloned bytes, +// so we need to make sure we've treated those offsets as "wrapping around". +// +// It turns out that this probe sequence visits every group exactly once if the +// number of groups is a power of two, since (i^2+i)/2 is a bijection in +// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing +template +class probe_seq { + public: + // Creates a new probe sequence using `hash` as the initial value of the + // sequence and `mask` (usually the capacity of the table) as the mask to + // apply to each value in the progression. + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + + // The offset within the table, i.e., the value `p(i)` above. + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index, a multiple of `Width`. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template +struct RequireUsableKey { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + absl::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template +constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} +template +constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { + return false; +} + +template +uint32_t TrailingZeros(T x) { + ABSL_ASSUME(x != 0); + return static_cast(countr_zero(x)); +} + +// An abstract bitmask, such as that emitted by a SIMD instruction. +// +// Specifically, this type implements a simple bitset whose representation is +// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number +// of abstract bits in the bitset, while `Shift` is the log-base-two of the +// width of an abstract bit in the representation. +// This mask provides operations for any number of real bits set in an abstract +// bit. To add iteration on top of that, implementation must guarantee no more +// than the most significant real bit is set in a set abstract bit. +template +class NonIterableBitMask { + public: + explicit NonIterableBitMask(T mask) : mask_(mask) {} + + explicit operator bool() const { return this->mask_ != 0; } + + // Returns the index of the lowest *abstract* bit set in `self`. + uint32_t LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the index of the highest *abstract* bit set in `self`. + uint32_t HighestBitSet() const { + return static_cast((bit_width(mask_) - 1) >> Shift); + } + + // Returns the number of trailing zero *abstract* bits. + uint32_t TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the number of leading zero *abstract* bits. + uint32_t LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return static_cast( + countl_zero(static_cast(mask_ << extra_bits))) >> + Shift; + } + + T mask_; +}; + +// Mask that can be iterable +// +// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just +// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When +// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as +// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. +// +// For example: +// for (int i : BitMask(0b101)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask : public NonIterableBitMask { + using Base = NonIterableBitMask; + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + explicit BitMask(T mask) : Base(mask) {} + // BitMask is an iterator over the indices of its abstract bits. + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + BitMask& operator++() { + if (Shift == 3) { + constexpr uint64_t msbs = 0x8080808080808080ULL; + this->mask_ &= msbs; + } + this->mask_ &= (this->mask_ - 1); + return *this; + } + + uint32_t operator*() const { return Base::LowestBitSet(); } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } +}; + +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. + +// A `ctrl_t` is a single control byte, which can have one of four +// states: empty, deleted, full (which has an associated seven-bit h2_t value) +// and the sentinel. They have the following bit patterns: +// +// empty: 1 0 0 0 0 0 0 0 +// deleted: 1 1 1 1 1 1 1 0 +// full: 0 h h h h h h h // h represents the hash bits. +// sentinel: 1 1 1 1 1 1 1 1 +// +// These values are specifically tuned for SSE-flavored SIMD. +// The static_asserts below detail the source of these choices. +// +// We use an enum class so that when strict aliasing is enabled, the compiler +// knows ctrl_t doesn't alias other types. +enum class ctrl_t : int8_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + (static_cast(ctrl_t::kEmpty) & + static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x80) != 0, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert( + ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, + "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " + "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); +static_assert( + ctrl_t::kSentinel == static_cast(-1), + "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(ctrl_t::kEmpty == static_cast(-128), + "ctrl_t::kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert( + (~static_cast(ctrl_t::kEmpty) & + ~static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x7F) != 0, + "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " + "shared by ctrl_t::kSentinel to make the scalar test for " + "MaskEmptyOrDeleted() efficient"); +static_assert(ctrl_t::kDeleted == static_cast(-2), + "ctrl_t::kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +// See definition comment for why this is size 32. +ABSL_DLL extern const ctrl_t kEmptyGroup[32]; + +// Returns a pointer to a control byte group that can be used by empty tables. +inline ctrl_t* EmptyGroup() { + // Const must be cast away here; no uses of this function will actually write + // to it, because it is only used for empty tables. + return const_cast(kEmptyGroup + 16); +} + +// Returns a pointer to a generation to use for an empty hashtable. +GenerationType* EmptyGeneration(); + +// Returns whether `generation` is a generation for an empty hashtable that +// could be returned by EmptyGeneration(). +inline bool IsEmptyGeneration(const GenerationType* generation) { + return *generation == SentinelEmptyGeneration(); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); + +// Returns a per-table, hash salt, which changes on resize. This gets mixed into +// H1 to randomize iteration order per-table. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t PerTableSalt(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} +// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ PerTableSalt(ctrl); +} + +// Extracts the H2 portion of a hash: the 7 bits not used for H1. +// +// These are used as an occupied control byte. +inline h2_t H2(size_t hash) { return hash & 0x7F; } + +// Helpers for checking the state of a control byte. +inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } +inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +// Quick reference guide for intrinsics used below: +// +// * __m128i: An XMM (128-bit) word. +// +// * _mm_setzero_si128: Returns a zero vector. +// * _mm_set1_epi8: Returns a vector with the same i8 in each lane. +// +// * _mm_subs_epi8: Saturating-subtracts two i8 vectors. +// * _mm_and_si128: Ands two i128s together. +// * _mm_or_si128: Ors two i128s together. +// * _mm_andnot_si128: And-nots two i128s together. +// +// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, +// filling each lane with 0x00 or 0xff. +// * _mm_cmpgt_epi8: Same as above, but using > rather than ==. +// +// * _mm_loadu_si128: Performs an unaligned load of an i128. +// * _mm_storeu_si128: Performs an unaligned store of an i128. +// +// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first +// argument if the corresponding lane of the second +// argument is positive, negative, or zero, respectively. +// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a +// bitmask consisting of those bits. +// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low +// four bits of each i8 lane in the second argument as +// indices. + +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); +} + +struct GroupSse2Impl { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8(static_cast(hash)); + BitMask result = BitMask(0); + result = BitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + return result; + } + + // Returns a bitmask representing the positions of empty slots. + NonIterableBitMask MaskEmpty() const { +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + // This only works because ctrl_t::kEmpty is -128. + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); +#else + auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); +#endif + } + + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + return BitMask( + static_cast(_mm_movemask_epi8(ctrl) ^ 0xffff)); + } + + // Returns a bitmask representing the positions of empty or deleted slots. + NonIterableBitMask MaskEmptyOrDeleted() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return NonIterableBitMask(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return TrailingZeros(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 + +#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +struct GroupAArch64Impl { + static constexpr size_t kWidth = 8; + + explicit GroupAArch64Impl(const ctrl_t* pos) { + ctrl = vld1_u8(reinterpret_cast(pos)); + } + + BitMask Match(h2_t hash) const { + uint8x8_t dup = vdup_n_u8(hash); + auto mask = vceq_u8(ctrl, dup); + return BitMask( + vget_lane_u64(vreinterpret_u64_u8(mask), 0)); + } + + NonIterableBitMask MaskEmpty() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vceq_s8( + vdup_n_s8(static_cast(ctrl_t::kEmpty)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + uint64_t mask = vget_lane_u64( + vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl), + vdup_n_s8(static_cast(0)))), + 0); + return BitMask(mask); + } + + NonIterableBitMask MaskEmptyOrDeleted() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( + vdup_n_s8(static_cast(ctrl_t::kSentinel)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcle_s8( + vdup_n_s8(static_cast(ctrl_t::kSentinel)), + vreinterpret_s8_u8(ctrl))), + 0); + // Similar to MaskEmptyorDeleted() but we invert the logic to invert the + // produced bitfield. We then count number of trailing zeros. + // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, + // so we should be fine. + return static_cast(countr_zero(mask)) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t slsbs = 0x0202020202020202ULL; + constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL; + auto x = slsbs & (mask >> 6); + auto res = (x + midbs) | msbs; + little_endian::Store64(dst, res); + } + + uint8x8_t ctrl; +}; +#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN + +struct GroupPortableImpl { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + NonIterableBitMask MaskEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & ~(ctrl << 6)) & + msbs); + } + + // Returns a bitmask representing the positions of full slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + BitMask MaskFull() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask((ctrl ^ msbs) & msbs); + } + + NonIterableBitMask MaskEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & ~(ctrl << 7)) & + msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + constexpr uint64_t bits = 0x0101010101010101ULL; + return static_cast(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> + 3); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +using Group = GroupSse2Impl; +using GroupEmptyOrDeleted = GroupSse2Impl; +#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +using Group = GroupAArch64Impl; +// For Aarch64, we use the portable implementation for counting and masking +// empty or deleted group elements. This is to avoid the latency of moving +// between data GPRs and Neon registers when it does not provide a benefit. +// Using Neon is profitable when we call Match(), but is not when we don't, +// which is the case when we do *EmptyOrDeleted operations. It is difficult to +// make a similar approach beneficial on other architectures such as x86 since +// they have much lower GPR <-> vector register transfer latency and 16-wide +// Groups. +using GroupEmptyOrDeleted = GroupPortableImpl; +#else +using Group = GroupPortableImpl; +using GroupEmptyOrDeleted = GroupPortableImpl; +#endif + +// When there is an insertion with no reserved growth, we rehash with +// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a +// constant divided by capacity ensures that inserting N elements is still O(N) +// in the average case. Using the constant 16 means that we expect to rehash ~8 +// times more often than when generations are disabled. We are adding expected +// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 - +// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth. +inline size_t RehashProbabilityConstant() { return 16; } + +class CommonFieldsGenerationInfoEnabled { + // A sentinel value for reserved_growth_ indicating that we just ran out of + // reserved growth on the last insertion. When reserve is called and then + // insertions take place, reserved_growth_'s state machine is N, ..., 1, + // kReservedGrowthJustRanOut, 0. + static constexpr size_t kReservedGrowthJustRanOut = + (std::numeric_limits::max)(); + + public: + CommonFieldsGenerationInfoEnabled() = default; + CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that) + : reserved_growth_(that.reserved_growth_), + reservation_size_(that.reservation_size_), + generation_(that.generation_) { + that.reserved_growth_ = 0; + that.reservation_size_ = 0; + that.generation_ = EmptyGeneration(); + } + CommonFieldsGenerationInfoEnabled& operator=( + CommonFieldsGenerationInfoEnabled&&) = default; + + // Whether we should rehash on insert in order to detect bugs of using invalid + // references. We rehash on the first insertion after reserved_growth_ reaches + // 0 after a call to reserve. We also do a rehash with low probability + // whenever reserved_growth_ is zero. + bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, + size_t capacity) const; + // Similar to above, except that we don't depend on reserved_growth_. + bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl, + size_t capacity) const; + void maybe_increment_generation_on_insert() { + if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0; + + if (reserved_growth_ > 0) { + if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut; + } else { + increment_generation(); + } + } + void increment_generation() { *generation_ = NextGeneration(*generation_); } + void reset_reserved_growth(size_t reservation, size_t size) { + reserved_growth_ = reservation - size; + } + size_t reserved_growth() const { return reserved_growth_; } + void set_reserved_growth(size_t r) { reserved_growth_ = r; } + size_t reservation_size() const { return reservation_size_; } + void set_reservation_size(size_t r) { reservation_size_ = r; } + GenerationType generation() const { return *generation_; } + void set_generation(GenerationType g) { *generation_ = g; } + GenerationType* generation_ptr() const { return generation_; } + void set_generation_ptr(GenerationType* g) { generation_ = g; } + + private: + // The number of insertions remaining that are guaranteed to not rehash due to + // a prior call to reserve. Note: we store reserved growth in addition to + // reservation size because calls to erase() decrease size_ but don't decrease + // reserved growth. + size_t reserved_growth_ = 0; + // The maximum argument to reserve() since the container was cleared. We need + // to keep track of this, in addition to reserved growth, because we reset + // reserved growth to this when erase(begin(), end()) is called. + size_t reservation_size_ = 0; + // Pointer to the generation counter, which is used to validate iterators and + // is stored in the backing array between the control bytes and the slots. + // Note that we can't store the generation inside the container itself and + // keep a pointer to the container in the iterators because iterators must + // remain valid when the container is moved. + // Note: we could derive this pointer from the control pointer, but it makes + // the code more complicated, and there's a benefit in having the sizes of + // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different, + // which is that tests are less likely to rely on the size remaining the same. + GenerationType* generation_ = EmptyGeneration(); +}; + +class CommonFieldsGenerationInfoDisabled { + public: + CommonFieldsGenerationInfoDisabled() = default; + CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) = + default; + CommonFieldsGenerationInfoDisabled& operator=( + CommonFieldsGenerationInfoDisabled&&) = default; + + bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const { + return false; + } + bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const { + return false; + } + void maybe_increment_generation_on_insert() {} + void increment_generation() {} + void reset_reserved_growth(size_t, size_t) {} + size_t reserved_growth() const { return 0; } + void set_reserved_growth(size_t) {} + size_t reservation_size() const { return 0; } + void set_reservation_size(size_t) {} + GenerationType generation() const { return 0; } + void set_generation(GenerationType) {} + GenerationType* generation_ptr() const { return nullptr; } + void set_generation_ptr(GenerationType*) {} +}; + +class HashSetIteratorGenerationInfoEnabled { + public: + HashSetIteratorGenerationInfoEnabled() = default; + explicit HashSetIteratorGenerationInfoEnabled( + const GenerationType* generation_ptr) + : generation_ptr_(generation_ptr), generation_(*generation_ptr) {} + + GenerationType generation() const { return generation_; } + void reset_generation() { generation_ = *generation_ptr_; } + const GenerationType* generation_ptr() const { return generation_ptr_; } + void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; } + + private: + const GenerationType* generation_ptr_ = EmptyGeneration(); + GenerationType generation_ = *generation_ptr_; +}; + +class HashSetIteratorGenerationInfoDisabled { + public: + HashSetIteratorGenerationInfoDisabled() = default; + explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {} + + GenerationType generation() const { return 0; } + void reset_generation() {} + const GenerationType* generation_ptr() const { return nullptr; } + void set_generation_ptr(const GenerationType*) {} +}; + +#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS +using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled; +using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled; +#else +using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled; +using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled; +#endif + +// Returns whether `n` is a valid capacity (i.e., number of slots). +// +// A valid capacity is a non-zero integer `2^m - 1`. +inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } + +// Computes the offset from the start of the backing allocation of control. +// infoz and growth_left are stored at the beginning of the backing array. +inline size_t ControlOffset(bool has_infoz) { + return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t); +} + +// Returns the number of "cloned control bytes". +// +// This is the number of control bytes that are present both at the beginning +// of the control byte array and at the end, such that we can create a +// `Group::kWidth`-width probe window starting from any control byte. +constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } + +// Given the capacity of a table, computes the offset (from the start of the +// backing allocation) of the generation counter (if it exists). +inline size_t GenerationOffset(size_t capacity, bool has_infoz) { + assert(IsValidCapacity(capacity)); + const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); + return ControlOffset(has_infoz) + num_control_bytes; +} + +// Given the capacity of a table, computes the offset (from the start of the +// backing allocation) at which the slots begin. +inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) { + assert(IsValidCapacity(capacity)); + return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() + + slot_align - 1) & + (~slot_align + 1); +} + +// Given the capacity of a table, computes the total size of the backing +// array. +inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align, + bool has_infoz) { + return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size; +} + +// CommonFields hold the fields in raw_hash_set that do not depend +// on template parameters. This allows us to conveniently pass all +// of this state to helper functions as a single argument. +class CommonFields : public CommonFieldsGenerationInfo { + public: + CommonFields() = default; + + // Not copyable + CommonFields(const CommonFields&) = delete; + CommonFields& operator=(const CommonFields&) = delete; + + // Movable + CommonFields(CommonFields&& that) = default; + CommonFields& operator=(CommonFields&&) = default; + + ctrl_t* control() const { return control_; } + void set_control(ctrl_t* c) { control_ = c; } + void* backing_array_start() const { + // growth_left (and maybe infoz) is stored before control bytes. + assert(reinterpret_cast(control()) % alignof(size_t) == 0); + return control() - ControlOffset(has_infoz()); + } + + // Note: we can't use slots() because Qt defines "slots" as a macro. + void* slot_array() const { return slots_; } + void set_slots(void* s) { slots_ = s; } + + // The number of filled slots. + size_t size() const { return size_ >> HasInfozShift(); } + void set_size(size_t s) { + size_ = (s << HasInfozShift()) | (size_ & HasInfozMask()); + } + void increment_size() { + assert(size() < capacity()); + size_ += size_t{1} << HasInfozShift(); + } + void decrement_size() { + assert(size() > 0); + size_ -= size_t{1} << HasInfozShift(); + } + + // The total number of available slots. + size_t capacity() const { return capacity_; } + void set_capacity(size_t c) { + assert(c == 0 || IsValidCapacity(c)); + capacity_ = c; + } + + // The number of slots we can still fill without needing to rehash. + // This is stored in the heap allocation before the control bytes. + size_t growth_left() const { + const size_t* gl_ptr = reinterpret_cast(control()) - 1; + assert(reinterpret_cast(gl_ptr) % alignof(size_t) == 0); + return *gl_ptr; + } + void set_growth_left(size_t gl) { + size_t* gl_ptr = reinterpret_cast(control()) - 1; + assert(reinterpret_cast(gl_ptr) % alignof(size_t) == 0); + *gl_ptr = gl; + } + + bool has_infoz() const { + return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0); + } + void set_has_infoz(bool has_infoz) { + size_ = (size() << HasInfozShift()) | static_cast(has_infoz); + } + + HashtablezInfoHandle infoz() { + return has_infoz() + ? *reinterpret_cast(backing_array_start()) + : HashtablezInfoHandle(); + } + void set_infoz(HashtablezInfoHandle infoz) { + assert(has_infoz()); + *reinterpret_cast(backing_array_start()) = infoz; + } + + bool should_rehash_for_bug_detection_on_insert() const { + return CommonFieldsGenerationInfo:: + should_rehash_for_bug_detection_on_insert(control(), capacity()); + } + bool should_rehash_for_bug_detection_on_move() const { + return CommonFieldsGenerationInfo:: + should_rehash_for_bug_detection_on_move(control(), capacity()); + } + void maybe_increment_generation_on_move() { + if (capacity() == 0) return; + increment_generation(); + } + void reset_reserved_growth(size_t reservation) { + CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); + } + + // The size of the backing array allocation. + size_t alloc_size(size_t slot_size, size_t slot_align) const { + return AllocSize(capacity(), slot_size, slot_align, has_infoz()); + } + + // Returns the number of control bytes set to kDeleted. For testing only. + size_t TombstonesCount() const { + return static_cast( + std::count(control(), control() + capacity(), ctrl_t::kDeleted)); + } + + private: + // We store the has_infoz bit in the lowest bit of size_. + static constexpr size_t HasInfozShift() { return 1; } + static constexpr size_t HasInfozMask() { + return (size_t{1} << HasInfozShift()) - 1; + } + + // TODO(b/182800944): Investigate removing some of these fields: + // - control/slots can be derived from each other + + // The control bytes (and, also, a pointer near to the base of the backing + // array). + // + // This contains `capacity + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + // + // Note that growth_left is stored immediately before this pointer. + ctrl_t* control_ = EmptyGroup(); + + // The beginning of the slots, located at `SlotOffset()` bytes after + // `control`. May be null for empty tables. + void* slots_ = nullptr; + + // The number of slots in the backing array. This is always 2^N-1 for an + // integer N. NOTE: we tried experimenting with compressing the capacity and + // storing it together with size_: (a) using 6 bits to store the corresponding + // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of + // size_ and storing size in the low bits. Both of these experiments were + // regressions, presumably because we need capacity to do find operations. + size_t capacity_ = 0; + + // The size and also has one bit that stores whether we have infoz. + size_t size_ = 0; +}; + +template +class raw_hash_set; + +// Returns the next valid capacity after `n`. +inline size_t NextCapacity(size_t n) { + assert(IsValidCapacity(n) || n == 0); + return n * 2 + 1; +} + +// Applies the following mapping to every byte in the control array: +// * kDeleted -> kEmpty +// * kEmpty -> kEmpty +// * _ -> kDeleted +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == ctrl_t::kSentinel +// ctrl[i] != ctrl_t::kSentinel for all i < capacity +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); + +// Converts `n` into the next valid capacity, per `IsValidCapacity`. +inline size_t NormalizeCapacity(size_t n) { + return n ? ~size_t{} >> countl_zero(n) : 1; +} + +// General notes on capacity/growth methods below: +// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an +// average of two empty slots per group. +// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. +// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we +// never need to probe (the whole table fits in one group) so we don't need a +// load factor less than 1. + +// Given `capacity`, applies the load factor; i.e., it returns the maximum +// number of values we should put into the table before a resizing rehash. +inline size_t CapacityToGrowth(size_t capacity) { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; +} + +// Given `growth`, "unapplies" the load factor to find how large the capacity +// should be to stay within the load factor. +// +// This might not be a valid capacity and `NormalizeCapacity()` should be +// called on this. +inline size_t GrowthToLowerboundCapacity(size_t growth) { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); +} + +template +size_t SelectBucketCountForIterRange(InputIter first, InputIter last, + size_t bucket_count) { + if (bucket_count != 0) { + return bucket_count; + } + using InputIterCategory = + typename std::iterator_traits::iterator_category; + if (std::is_base_of::value) { + return GrowthToLowerboundCapacity( + static_cast(std::distance(first, last))); + } + return 0; +} + +constexpr bool SwisstableDebugEnabled() { +#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \ + ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG) + return true; +#else + return false; +#endif +} + +inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation, + const GenerationType* generation_ptr, + const char* operation) { + if (!SwisstableDebugEnabled()) return; + // `SwisstableDebugEnabled()` is also true for release builds with hardening + // enabled. To minimize their impact in those builds: + // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout + // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve + // the chances that the hot paths will be inlined. + if (ABSL_PREDICT_FALSE(ctrl == nullptr)) { + ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation); + } + if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) { + ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.", + operation); + } + if (SwisstableGenerationsEnabled()) { + if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { + ABSL_RAW_LOG(FATAL, + "%s called on invalid iterator. The table could have " + "rehashed or moved since this iterator was initialized.", + operation); + } + if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { + ABSL_RAW_LOG( + FATAL, + "%s called on invalid iterator. The element was likely erased.", + operation); + } + } else { + if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) { + ABSL_RAW_LOG( + FATAL, + "%s called on invalid iterator. The element might have been erased " + "or the table might have rehashed. Consider running with " + "--config=asan to diagnose rehashing issues.", + operation); + } + } +} + +// Note that for comparisons, null/end iterators are valid. +inline void AssertIsValidForComparison(const ctrl_t* ctrl, + GenerationType generation, + const GenerationType* generation_ptr) { + if (!SwisstableDebugEnabled()) return; + const bool ctrl_is_valid_for_comparison = + ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl); + if (SwisstableGenerationsEnabled()) { + if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. The table could have rehashed " + "or moved since this iterator was initialized."); + } + if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) { + ABSL_RAW_LOG( + FATAL, "Invalid iterator comparison. The element was likely erased."); + } + } else { + ABSL_HARDENING_ASSERT( + ctrl_is_valid_for_comparison && + "Invalid iterator comparison. The element might have been erased or " + "the table might have rehashed. Consider running with --config=asan to " + "diagnose rehashing issues."); + } +} + +// If the two iterators come from the same container, then their pointers will +// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa. +// Note: we take slots by reference so that it's not UB if they're uninitialized +// as long as we don't read them (when ctrl is null). +inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a, + const ctrl_t* ctrl_b, + const void* const& slot_a, + const void* const& slot_b) { + // If either control byte is null, then we can't tell. + if (ctrl_a == nullptr || ctrl_b == nullptr) return true; + const void* low_slot = slot_a; + const void* hi_slot = slot_b; + if (ctrl_a > ctrl_b) { + std::swap(ctrl_a, ctrl_b); + std::swap(low_slot, hi_slot); + } + return ctrl_b < low_slot && low_slot <= hi_slot; +} + +// Asserts that two iterators come from the same container. +// Note: we take slots by reference so that it's not UB if they're uninitialized +// as long as we don't read them (when ctrl is null). +inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, + const void* const& slot_a, + const void* const& slot_b, + const GenerationType* generation_ptr_a, + const GenerationType* generation_ptr_b) { + if (!SwisstableDebugEnabled()) return; + // `SwisstableDebugEnabled()` is also true for release builds with hardening + // enabled. To minimize their impact in those builds: + // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout + // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve + // the chances that the hot paths will be inlined. + const bool a_is_default = ctrl_a == EmptyGroup(); + const bool b_is_default = ctrl_b == EmptyGroup(); + if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) { + ABSL_RAW_LOG( + FATAL, + "Invalid iterator comparison. Comparing default-constructed iterator " + "with non-default-constructed iterator."); + } + if (a_is_default && b_is_default) return; + + if (SwisstableGenerationsEnabled()) { + if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return; + const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); + const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); + if (a_is_empty != b_is_empty) { + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator from a " + "non-empty hashtable with an iterator from an empty " + "hashtable."); + } + if (a_is_empty && b_is_empty) { + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterators from " + "different empty hashtables."); + } + const bool a_is_end = ctrl_a == nullptr; + const bool b_is_end = ctrl_b == nullptr; + if (a_is_end || b_is_end) { + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator with an " + "end() iterator from a different hashtable."); + } + ABSL_RAW_LOG(FATAL, + "Invalid iterator comparison. Comparing non-end() iterators " + "from different hashtables."); + } else { + ABSL_HARDENING_ASSERT( + AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && + "Invalid iterator comparison. The iterators may be from different " + "containers or the container might have rehashed or moved. Consider " + "running with --config=asan to diagnose issues."); + } +} + +struct FindInfo { + size_t offset; + size_t probe_length; +}; + +// Whether a table is "small". A small table fits entirely into a probing +// group, i.e., has a capacity < `Group::kWidth`. +// +// In small mode we are able to use the whole capacity. The extra control +// bytes give us at least one "empty" control byte to stop the iteration. +// This is important to make 1 a valid capacity. +// +// In small mode only the first `capacity` control bytes after the sentinel +// are valid. The rest contain dummy ctrl_t::kEmpty values that do not +// represent a real slot. This is important to take into account on +// `find_first_non_full()`, where we never try +// `ShouldInsertBackwards()` for small tables. +inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } + +// Whether a table fits entirely into a probing group. +// Arbitrary order of elements in such tables is correct. +inline bool is_single_group(size_t capacity) { + return capacity <= Group::kWidth; +} + +// Begins a probing operation on `common.control`, using `hash`. +inline probe_seq probe(const ctrl_t* ctrl, const size_t capacity, + size_t hash) { + return probe_seq(H1(hash, ctrl), capacity); +} +inline probe_seq probe(const CommonFields& common, size_t hash) { + return probe(common.control(), common.capacity(), hash); +} + +// Probes an array of control bits using a probe sequence derived from `hash`, +// and returns the offset corresponding to the first deleted or empty slot. +// +// Behavior when the entire table is full is undefined. +// +// NOTE: this function must work with tables having both empty and deleted +// slots in the same group. Such tables appear during `erase()`. +template +inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) { + auto seq = probe(common, hash); + const ctrl_t* ctrl = common.control(); + while (true) { + GroupEmptyOrDeleted g{ctrl + seq.offset()}; + auto mask = g.MaskEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() <= common.capacity() && "full table!"); + } +} + +// Extern template for inline function keep possibility of inlining. +// When compiler decided to not inline, no symbols will be added to the +// corresponding translation unit. +extern template FindInfo find_first_non_full(const CommonFields&, size_t); + +// Non-inlined version of find_first_non_full for use in less +// performance critical routines. +FindInfo find_first_non_full_outofline(const CommonFields&, size_t); + +inline void ResetGrowthLeft(CommonFields& common) { + common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size()); +} + +// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire +// array as marked as empty. +inline void ResetCtrl(CommonFields& common, size_t slot_size) { + const size_t capacity = common.capacity(); + ctrl_t* ctrl = common.control(); + std::memset(ctrl, static_cast(ctrl_t::kEmpty), + capacity + 1 + NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; + SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); +} + +// Sets `ctrl[i]` to `h`. +// +// Unlike setting it directly, this function will perform bounds checks and +// mirror the value to the cloned tail if necessary. +inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h, + size_t slot_size) { + const size_t capacity = common.capacity(); + assert(i < capacity); + + auto* slot_i = static_cast(common.slot_array()) + i * slot_size; + if (IsFull(h)) { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } else { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } + + ctrl_t* ctrl = common.control(); + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; +} + +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrl(const CommonFields& common, size_t i, h2_t h, + size_t slot_size) { + SetCtrl(common, i, static_cast(h), slot_size); +} + +// growth_left (which is a size_t) is stored with the backing array. +constexpr size_t BackingArrayAlignment(size_t align_of_slot) { + return (std::max)(align_of_slot, alignof(size_t)); +} + +// Returns the address of the ith slot in slots where each slot occupies +// slot_size. +inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { + return reinterpret_cast(reinterpret_cast(slot_array) + + (slot * slot_size)); +} + +// Helper class to perform resize of the hash set. +// +// It contains special optimizations for small group resizes. +// See GrowIntoSingleGroupShuffleControlBytes for details. +class HashSetResizeHelper { + public: + explicit HashSetResizeHelper(CommonFields& c) + : old_ctrl_(c.control()), + old_capacity_(c.capacity()), + had_infoz_(c.has_infoz()) {} + + // Optimized for small groups version of `find_first_non_full` applicable + // only right after calling `raw_hash_set::resize`. + // It has implicit assumption that `resize` will call + // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`. + // Falls back to `find_first_non_full` in case of big groups, so it is + // safe to use after `rehash_and_grow_if_necessary`. + static FindInfo FindFirstNonFullAfterResize(const CommonFields& c, + size_t old_capacity, + size_t hash) { + if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) { + return find_first_non_full(c, hash); + } + // Find a location for the new element non-deterministically. + // Note that any position is correct. + // It will located at `half_old_capacity` or one of the other + // empty slots with approximately 50% probability each. + size_t offset = probe(c, hash).offset(); + + // Note that we intentionally use unsigned int underflow. + if (offset - (old_capacity + 1) >= old_capacity) { + // Offset fall on kSentinel or into the mostly occupied first half. + offset = old_capacity / 2; + } + assert(IsEmpty(c.control()[offset])); + return FindInfo{offset, 0}; + } + + ctrl_t* old_ctrl() const { return old_ctrl_; } + size_t old_capacity() const { return old_capacity_; } + + // Allocates a backing array for the hashtable. + // Reads `capacity` and updates all other fields based on the result of + // the allocation. + // + // It also may do the folowing actions: + // 1. initialize control bytes + // 2. initialize slots + // 3. deallocate old slots. + // + // We are bundling a lot of functionality + // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code + // duplication in raw_hash_set<>::resize. + // + // `c.capacity()` must be nonzero. + // POSTCONDITIONS: + // 1. CommonFields is initialized. + // + // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy + // Both control bytes and slots are fully initialized. + // old_slots are deallocated. + // infoz.RecordRehash is called. + // + // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy + // Control bytes are fully initialized. + // infoz.RecordRehash is called. + // GrowSizeIntoSingleGroup must be called to finish slots initialization. + // + // if !IsGrowingIntoSingleGroupApplicable + // Control bytes are initialized to empty table via ResetCtrl. + // raw_hash_set<>::resize must insert elements regularly. + // infoz.RecordRehash is called if old_capacity == 0. + // + // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation. + template + ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots, + Alloc alloc) { + assert(c.capacity()); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or won't make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + const size_t sample_size = + (std::is_same>::value && + c.slot_array() == nullptr) + ? SizeOfSlot + : 0; + HashtablezInfoHandle infoz = + sample_size > 0 ? Sample(sample_size) : c.infoz(); + + const bool has_infoz = infoz.IsSampled(); + const size_t cap = c.capacity(); + const size_t alloc_size = + AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz); + char* mem = static_cast( + Allocate(&alloc, alloc_size)); + const GenerationType old_generation = c.generation(); + c.set_generation_ptr(reinterpret_cast( + mem + GenerationOffset(cap, has_infoz))); + c.set_generation(NextGeneration(old_generation)); + c.set_control(reinterpret_cast(mem + ControlOffset(has_infoz))); + c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz)); + ResetGrowthLeft(c); + + const bool grow_single_group = + IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()); + if (old_capacity_ != 0 && grow_single_group) { + if (TransferUsesMemcpy) { + GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot); + DeallocateOld(alloc, SizeOfSlot, old_slots); + } else { + GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity()); + } + } else { + ResetCtrl(c, SizeOfSlot); + } + + c.set_has_infoz(has_infoz); + if (has_infoz) { + infoz.RecordStorageChanged(c.size(), cap); + if (grow_single_group || old_capacity_ == 0) { + infoz.RecordRehash(0); + } + c.set_infoz(infoz); + } + return grow_single_group; + } + + // Relocates slots into new single group consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // + // PRECONDITIONS: + // 1. GrowIntoSingleGroupShuffleControlBytes was already called. + template + void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref, + typename PolicyTraits::slot_type* old_slots) { + assert(old_capacity_ < Group::kWidth / 2); + assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity())); + using slot_type = typename PolicyTraits::slot_type; + assert(is_single_group(c.capacity())); + + auto* new_slots = reinterpret_cast(c.slot_array()); + + size_t shuffle_bit = old_capacity_ / 2 + 1; + for (size_t i = 0; i < old_capacity_; ++i) { + if (IsFull(old_ctrl_[i])) { + size_t new_i = i ^ shuffle_bit; + SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i); + } + } + PoisonSingleGroupEmptySlots(c, sizeof(slot_type)); + } + + // Deallocates old backing array. + template + void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) { + SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_); + Deallocate( + &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_), + AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_)); + } + + private: + // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing. + static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity, + size_t new_capacity) { + // NOTE that `old_capacity < new_capacity` in order to have + // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes. + return is_single_group(new_capacity) && old_capacity < new_capacity; + } + + // Relocates control bytes and slots into new single group for + // transferable objects. + // Must be called only if IsGrowingIntoSingleGroupApplicable returned true. + void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots, + size_t slot_size); + + // Shuffle control bits deterministically to the next capacity. + // Returns offset for newly added element with given hash. + // + // PRECONDITIONs: + // 1. new_ctrl is allocated for new_capacity, + // but not initialized. + // 2. new_capacity is a single group. + // + // All elements are transferred into the first `old_capacity + 1` positions + // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions + // in order to change an order and keep it non deterministic. + // Although rotation itself deterministic, position of the new added element + // will be based on `H1` and is not deterministic. + // + // Examples: + // S = kSentinel, E = kEmpty + // + // old_ctrl = SEEEEEEEE... + // new_ctrl = ESEEEEEEE... + // + // old_ctrl = 0SEEEEEEE... + // new_ctrl = E0ESE0EEE... + // + // old_ctrl = 012S012EEEEEEEEE... + // new_ctrl = 2E01EEES2E01EEE... + // + // old_ctrl = 0123456S0123456EEEEEEEEEEE... + // new_ctrl = 456E0123EEEEEES456E0123EEE... + void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl, + size_t new_capacity) const; + + // Shuffle trivially transferable slots in the way consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // + // PRECONDITIONs: + // 1. old_capacity must be non-zero. + // 2. new_ctrl is fully initialized using + // GrowIntoSingleGroupShuffleControlBytes. + // 3. new_slots is allocated and *not* poisoned. + // + // POSTCONDITIONS: + // 1. new_slots are transferred from old_slots_ consistent with + // GrowIntoSingleGroupShuffleControlBytes. + // 2. Empty new_slots are *not* poisoned. + void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots, + void* new_slots, + size_t slot_size) const; + + // Poison empty slots that were transferred using the deterministic algorithm + // described above. + // PRECONDITIONs: + // 1. new_ctrl is fully initialized using + // GrowIntoSingleGroupShuffleControlBytes. + // 2. new_slots is fully initialized consistent with + // GrowIntoSingleGroupShuffleControlBytes. + void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const { + // poison non full items + for (size_t i = 0; i < c.capacity(); ++i) { + if (!IsFull(c.control()[i])) { + SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size), + slot_size); + } + } + } + + ctrl_t* old_ctrl_; + size_t old_capacity_; + bool had_infoz_; +}; + +// PolicyFunctions bundles together some information for a particular +// raw_hash_set instantiation. This information is passed to +// type-erased functions that want to do small amounts of type-specific +// work. +struct PolicyFunctions { + size_t slot_size; + + // Returns the hash of the pointed-to slot. + size_t (*hash_slot)(void* set, void* slot); + + // Transfer the contents of src_slot to dst_slot. + void (*transfer)(void* set, void* dst_slot, void* src_slot); + + // Deallocate the backing store from common. + void (*dealloc)(CommonFields& common, const PolicyFunctions& policy); +}; + +// ClearBackingArray clears the backing array, either modifying it in place, +// or creating a new one based on the value of "reuse". +// REQUIRES: c.capacity > 0 +void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, + bool reuse); + +// Type-erased version of raw_hash_set::erase_meta_only. +void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size); + +// Function to place in PolicyFunctions::dealloc for raw_hash_sets +// that are using std::allocator. This allows us to share the same +// function body for raw_hash_set instantiations that have the +// same slot alignment. +template +ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common, + const PolicyFunctions& policy) { + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(common.slot_array(), + policy.slot_size * common.capacity()); + + std::allocator alloc; + common.infoz().Unregister(); + Deallocate( + &alloc, common.backing_array_start(), + common.alloc_size(policy.slot_size, AlignOfSlot)); +} + +// For trivially relocatable types we use memcpy directly. This allows us to +// share the same function body for raw_hash_set instantiations that have the +// same slot size as long as they are relocatable. +template +ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) { + memcpy(dst, src, SizeOfSlot); +} + +// Type-erased version of raw_hash_set::drop_deletes_without_resize. +void DropDeletesWithoutResize(CommonFields& common, + const PolicyFunctions& policy, void* tmp_space); + +// A SwissTable. +// +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator +// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template +class raw_hash_set { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). + using CharAlloc = + typename absl::allocator_traits::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator : private HashSetIteratorGenerationInfo { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()"); + return unchecked_deref(); + } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->"); + return &operator*(); + } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++"); + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr()); + AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr()); + AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_, + a.generation_ptr(), b.generation_ptr()); + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl, slot_type* slot, + const GenerationType* generation_ptr) + : HashSetIteratorGenerationInfo(generation_ptr), + ctrl_(ctrl), + slot_(slot) { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_ASSUME(ctrl != nullptr); + } + // For end() iterators. + explicit iterator(const GenerationType* generation_ptr) + : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {} + + // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until + // they reach one. + // + // If a sentinel is reached, we null `ctrl_` out instead. + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + uint32_t shift = + GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; + } + + ctrl_t* control() const { return ctrl_; } + slot_type* slot() const { return slot_; } + + // We use EmptyGroup() for default-constructed iterators so that they can + // be distinguished from end iterators, which have nullptr ctrl_. + ctrl_t* ctrl_ = EmptyGroup(); + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; + + // An equality check which skips ABSL Hardening iterator invalidation + // checks. + // Should be used when the lifetimes of the iterators are well-enough + // understood to prove that they cannot be invalid. + bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); } + + // Dereferences the iterator without ABSL Hardening iterator invalidation + // checks. + reference unchecked_deref() const { return PolicyTraits::element(slot_); } + }; + + class const_iterator { + friend class raw_hash_set; + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() = default; + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot, + const GenerationType* gen) + : inner_(const_cast(ctrl), const_cast(slot), gen) { + } + ctrl_t* control() const { return inner_.control(); } + slot_type* slot() const { return inner_.slot(); } + + iterator inner_; + + bool unchecked_equals(const const_iterator& b) { + return inner_.unchecked_equals(b.inner_); + } + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + // Note: can't use `= default` due to non-default noexcept (causes + // problems for some compilers). NOLINTNEXTLINE + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value && + std::is_nothrow_default_constructible::value && + std::is_nothrow_default_constructible::value) {} + + ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set( + size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : settings_(CommonFields{}, hash, eq, alloc) { + if (bucket_count) { + resize(NormalizeCapacity(bucket_count)); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), + hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + const size_t size = that.size(); + if (size == 0) return; + reserve(size); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full_outofline(common(), hash); + SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); + emplace_at(target.offset, v); + common().maybe_increment_generation_on_insert(); + infoz().RecordInsert(hash, target.probe_length); + } + common().set_size(size); + set_growth_left(growth_left() - size); + } + + ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value && + std::is_nothrow_copy_constructible::value && + std::is_nothrow_copy_constructible::value) + : // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + // TODO(b/296061262): move instead of copying hash/eq/alloc. + // Note: we avoid using exchange for better generated code. + settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(), + that.alloc_ref()) { + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) { + if (a == that.alloc_ref()) { + std::swap(common(), that.common()); + maybe_increment_generation_or_rehash_on_move(); + } else { + move_elements_allocs_unequal(std::move(that)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + if (ABSL_PREDICT_FALSE(this == &that)) return *this; + constexpr bool propagate_alloc = + AllocTraits::propagate_on_container_copy_assignment::value; + // TODO(ezb): maybe avoid allocating a new backing array if this->capacity() + // is an exact match for that.size(). If this->capacity() is too big, then + // it would make iteration very slow to reuse the allocation. Maybe we can + // do the same heuristic as clear() and reuse if it's small enough. + raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref()); + // NOLINTNEXTLINE: not returning *this for performance. + return assign_impl(std::move(tmp)); + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value && + std::is_nothrow_move_assignable::value && + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + // NOLINTNEXTLINE: not returning *this for performance. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destructor_impl(); } + + iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return iterator(common().generation_ptr()); + } + + const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_cast(this)->begin(); + } + const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return iterator(common().generation_ptr()); + } + const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return begin(); + } + const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return common().size(); } + size_t capacity() const { return common().capacity(); } + size_t max_size() const { return (std::numeric_limits::max)(); } + + ABSL_ATTRIBUTE_REINITIALIZES void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + const size_t cap = capacity(); + if (cap == 0) { + // Already guaranteed to be empty; so nothing to do. + } else { + destroy_slots(); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128); + } + common().set_reserved_growth(0); + common().set_reservation_size(0); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_map s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(std::move(value)); + } + + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert(std::forward(value)).first; + } + + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, + const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert(value).first; + } + + iterator insert(const_iterator, + init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) emplace(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem); + if (res.second) { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, + node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto res = insert(std::move(node)); + node = std::move(res.node); + return res.position; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + slot_type* slot = reinterpret_cast(&raw); + + construct(slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, + Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`, + // and returns an iterator to the new element. + // + // `f` must abide by several restrictions: + // - it MUST call `raw_hash_set::constructor` with arguments as if a + // `raw_hash_set::value_type` is constructed, + // - it MUST NOT access the container before the call to + // `raw_hash_set::constructor`, and + // - it MUST NOT erase the lazily emplaced element. + // Doing any of these is undefined behavior. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, + F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slot_array() + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; + // if () { + // m.erase(copy_it); + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()"); + destroy(it.slot()); + erase_meta_only(it); + } + + iterator erase(const_iterator first, + const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND { + // We check for empty first because ClearBackingArray requires that + // capacity() > 0 as a precondition. + if (empty()) return end(); + if (first == begin() && last == end()) { + // TODO(ezb): we access control bytes in destroy_slots so it could make + // sense to combine destroy_slots and ClearBackingArray to avoid cache + // misses when the table is large. Note that we also do this in clear(). + destroy_slots(); + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true); + common().set_reserved_growth(common().reservation_size()); + return end(); + } + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e;) { + auto next = std::next(it); + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot())}, + PolicyTraits::element(it.slot())) + .second) { + src.erase_meta_only(it); + } + it = next; + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + AssertIsFull(position.control(), position.inner_.generation(), + position.inner_.generation_ptr(), "extract()"); + auto node = CommonAccess::Transfer(alloc_ref(), position.slot()); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + IsNoThrowSwappable( + typename AllocTraits::propagate_on_container_swap{})) { + using std::swap; + swap(common(), that.common()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + SwapAlloc(alloc_ref(), that.alloc_ref(), + typename AllocTraits::propagate_on_container_swap{}); + } + + void rehash(size_t n) { + if (n == 0 && capacity() == 0) return; + if (n == 0 && size() == 0) { + ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false); + return; + } + + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity()) { + resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + void reserve(size_t n) { + if (n > size() + growth_left()) { + size_t m = GrowthToLowerboundCapacity(n); + resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + common().reset_reserved_growth(n); + common().set_reservation_size(n); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const { + (void)key; + // Avoid probing if we won't be able to prefetch the addresses received. +#ifdef ABSL_HAVE_PREFETCH + prefetch_heap_block(); + auto seq = probe(common(), hash_ref()(key)); + PrefetchToLocalCache(control() + seq.offset()); + PrefetchToLocalCache(slot_array() + seq.offset()); +#endif // ABSL_HAVE_PREFETCH + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, + size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto seq = probe(common(), hash); + slot_type* slot_ptr = slot_array(); + const ctrl_t* ctrl = control(); + while (true) { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slot_ptr + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + } + template + iterator find(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, + size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const + ABSL_ATTRIBUTE_LIFETIME_BOUND { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const { + // Here neither the iterator returned by `find()` nor `end()` can be invalid + // outside of potential thread-safety issues. + // `find()`'s return value is constructed, used, and then destructed + // all in this context. + return !find(key).unchecked_equals(end()); + } + + template + std::pair equal_range(const key_arg& key) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity(); } + float load_factor() const { + return capacity() ? static_cast(size()) / capacity() : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) { + auto it = PolicyTraits::apply(FindElement{*inner}, elem); + if (it == inner->end() || !(*it == elem)) return false; + } + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + template + friend typename std::enable_if::value, + H>::type + AbslHashValue(H h, const raw_hash_set& s) { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), + s.size()); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template + std::pair operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.transfer(s.slot_array() + res.first, &slot); + } else if (do_destroy) { + s.destroy(&slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // TODO(b/303305702): re-enable reentrant validation. + template + inline void construct(slot_type* slot, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + } + inline void destroy(slot_type* slot) { + PolicyTraits::destroy(&alloc_ref(), slot); + } + inline void transfer(slot_type* to, slot_type* from) { + PolicyTraits::transfer(&alloc_ref(), to, from); + } + + inline void destroy_slots() { + const size_t cap = capacity(); + const ctrl_t* ctrl = control(); + slot_type* slot = slot_array(); + for (size_t i = 0; i != cap; ++i) { + if (IsFull(ctrl[i])) { + destroy(slot + i); + } + } + } + + inline void dealloc() { + assert(capacity() != 0); + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity()); + infoz().Unregister(); + Deallocate( + &alloc_ref(), common().backing_array_start(), + common().alloc_size(sizeof(slot_type), alignof(slot_type))); + } + + inline void destructor_impl() { + if (capacity() == 0) return; + destroy_slots(); + dealloc(); + } + + // Erases, but does not destroy, the value pointed to by `it`. + // + // This merely updates the pertinent control byte. This can be used in + // conjunction with Policy::transfer to move the object to another place. + void erase_meta_only(const_iterator it) { + EraseMetaOnly(common(), static_cast(it.control() - control()), + sizeof(slot_type)); + } + + // Resizes table to the new capacity and move all elements to the new + // positions accordingly. + // + // Note that for better performance instead of + // find_first_non_full(common(), hash), + // HashSetResizeHelper::FindFirstNonFullAfterResize( + // common(), old_capacity, hash) + // can be called right after `resize`. + ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + HashSetResizeHelper resize_helper(common()); + auto* old_slots = slot_array(); + common().set_capacity(new_capacity); + // Note that `InitializeSlots` does different number initialization steps + // depending on the values of `transfer_uses_memcpy` and capacities. + // Refer to the comment in `InitializeSlots` for more details. + const bool grow_single_group = + resize_helper.InitializeSlots( + common(), const_cast*>(old_slots), + CharAlloc(alloc_ref())); + + if (resize_helper.old_capacity() == 0) { + // InitializeSlots did all the work including infoz().RecordRehash(). + return; + } + + if (grow_single_group) { + if (PolicyTraits::transfer_uses_memcpy()) { + // InitializeSlots did all the work. + return; + } + // We want GrowSizeIntoSingleGroup to be called here in order to make + // InitializeSlots not depend on PolicyTraits. + resize_helper.GrowSizeIntoSingleGroup(common(), alloc_ref(), + old_slots); + } else { + // InitializeSlots prepares control bytes to correspond to empty table. + auto* new_slots = slot_array(); + size_t total_probe_length = 0; + for (size_t i = 0; i != resize_helper.old_capacity(); ++i) { + if (IsFull(resize_helper.old_ctrl()[i])) { + size_t hash = PolicyTraits::apply( + HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(common(), hash); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + SetCtrl(common(), new_i, H2(hash), sizeof(slot_type)); + transfer(new_slots + new_i, old_slots + i); + } + } + infoz().RecordRehash(total_probe_length); + } + resize_helper.DeallocateOld( + CharAlloc(alloc_ref()), sizeof(slot_type), + const_cast*>(old_slots)); + } + + // Prunes control bytes to remove as many tombstones as possible. + // + // See the comment on `rehash_and_grow_if_necessary()`. + inline void drop_deletes_without_resize() { + // Stack-allocate space for swapping elements. + alignas(slot_type) unsigned char tmp[sizeof(slot_type)]; + DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp); + } + + // Called whenever the table *might* need to conditionally grow. + // + // This function is an optimization opportunity to perform a rehash even when + // growth is unnecessary, because vacating tombstones is beneficial for + // performance in the long-run. + void rehash_and_grow_if_necessary() { + const size_t cap = capacity(); + if (cap > Group::kWidth && + // Do these calculations in 64-bit to avoid overflow. + size() * uint64_t{32} <= cap * uint64_t{25}) { + // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity. + // Rationale for such a high factor: 1) drop_deletes_without_resize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity) vs. this code (which rehashes in place + // if we can recover 3/32*capacity). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(NextCapacity(cap)); + } + } + + void maybe_increment_generation_or_rehash_on_move() { + common().maybe_increment_generation_on_move(); + if (!empty() && common().should_rehash_for_bug_detection_on_move()) { + resize(capacity()); + } + } + + template + raw_hash_set& assign_impl(raw_hash_set&& that) { + // We don't bother checking for this/that aliasing. We just need to avoid + // breaking the invariants in that case. + destructor_impl(); + common() = std::move(that.common()); + // TODO(b/296061262): move instead of copying hash/eq/alloc. + hash_ref() = that.hash_ref(); + eq_ref() = that.eq_ref(); + CopyAlloc(alloc_ref(), that.alloc_ref(), + std::integral_constant()); + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); + return *this; + } + + raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) { + const size_t size = that.size(); + if (size == 0) return *this; + reserve(size); + for (iterator it = that.begin(); it != that.end(); ++it) { + insert(std::move(PolicyTraits::element(it.slot()))); + that.destroy(it.slot()); + } + that.dealloc(); + that.common() = CommonFields{}; + maybe_increment_generation_or_rehash_on_move(); + return *this; + } + + raw_hash_set& move_assign(raw_hash_set&& that, + std::true_type /*propagate_alloc*/) { + return assign_impl(std::move(that)); + } + raw_hash_set& move_assign(raw_hash_set&& that, + std::false_type /*propagate_alloc*/) { + if (alloc_ref() == that.alloc_ref()) { + return assign_impl(std::move(that)); + } + // Aliasing can't happen here because allocs would compare equal above. + assert(this != &that); + destructor_impl(); + // We can't take over that's memory so we need to move each element. + // While moving elements, this should have that's hash/eq so copy hash/eq + // before moving elements. + // TODO(b/296061262): move instead of copying hash/eq. + hash_ref() = that.hash_ref(); + eq_ref() = that.eq_ref(); + return move_elements_allocs_unequal(std::move(that)); + } + + protected: + // Attempts to find `key` in the table; if it isn't found, returns a slot that + // the value can be inserted into, with the control byte already set to + // `key`'s H2. + template + std::pair find_or_prepare_insert(const K& key) { + prefetch_heap_block(); + auto hash = hash_ref()(key); + auto seq = probe(common(), hash); + const ctrl_t* ctrl = control(); + while (true) { + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slot_array() + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; + seq.next(); + assert(seq.index() <= capacity() && "full table!"); + } + return {prepare_insert(hash), true}; + } + + // Given the hash of a value not currently in the table, finds the next + // viable slot index to insert it at. + // + // REQUIRES: At least one non-full slot available. + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + const bool rehash_for_bug_detection = + common().should_rehash_for_bug_detection_on_insert(); + if (rehash_for_bug_detection) { + // Move to a different heap allocation in order to detect bugs. + const size_t cap = capacity(); + resize(growth_left() > 0 ? cap : NextCapacity(cap)); + } + auto target = find_first_non_full(common(), hash); + if (!rehash_for_bug_detection && + ABSL_PREDICT_FALSE(growth_left() == 0 && + !IsDeleted(control()[target.offset]))) { + size_t old_capacity = capacity(); + rehash_and_grow_if_necessary(); + // NOTE: It is safe to use `FindFirstNonFullAfterResize`. + // `FindFirstNonFullAfterResize` must be called right after resize. + // `rehash_and_grow_if_necessary` may *not* call `resize` + // and perform `drop_deletes_without_resize` instead. But this + // could happen only on big tables. + // For big tables `FindFirstNonFullAfterResize` will always + // fallback to normal `find_first_non_full`, so it is safe to use it. + target = HashSetResizeHelper::FindFirstNonFullAfterResize( + common(), old_capacity, hash); + } + common().increment_size(); + set_growth_left(growth_left() - IsEmpty(control()[target.offset])); + SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type)); + common().maybe_increment_generation_on_insert(); + infoz().RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + construct(slot_array() + i, std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND { + return {control() + i, slot_array() + i, common().generation_ptr()}; + } + const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return {control() + i, slot_array() + i, common().generation_ptr()}; + } + + reference unchecked_deref(iterator it) { return it.unchecked_deref(); } + + private: + friend struct RawHashSetTestOnlyAccess; + + // The number of slots we can still fill without needing to rehash. + // + // This is stored separately due to tombstones: we do not include tombstones + // in the growth capacity, because we'd like to rehash when the table is + // otherwise filled with tombstones: otherwise, probe sequences might get + // unacceptably long without triggering a rehash. Callers can also force a + // rehash via the standard `rehash(0)`, which will recompute this value as a + // side-effect. + // + // See `CapacityToGrowth()`. + size_t growth_left() const { return common().growth_left(); } + void set_growth_left(size_t gl) { return common().set_growth_left(gl); } + + // Prefetch the heap-allocated memory region to resolve potential TLB and + // cache misses. This is intended to overlap with execution of calculating the + // hash for a key. + void prefetch_heap_block() const { +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + __builtin_prefetch(control(), 0, 1); +#endif + } + + CommonFields& common() { return settings_.template get<0>(); } + const CommonFields& common() const { return settings_.template get<0>(); } + + ctrl_t* control() const { return common().control(); } + slot_type* slot_array() const { + return static_cast(common().slot_array()); + } + HashtablezInfoHandle infoz() { return common().infoz(); } + + hasher& hash_ref() { return settings_.template get<1>(); } + const hasher& hash_ref() const { return settings_.template get<1>(); } + key_equal& eq_ref() { return settings_.template get<2>(); } + const key_equal& eq_ref() const { return settings_.template get<2>(); } + allocator_type& alloc_ref() { return settings_.template get<3>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<3>(); + } + + // Make type-specific functions for this type's PolicyFunctions struct. + static size_t hash_slot_fn(void* set, void* slot) { + auto* h = static_cast(set); + return PolicyTraits::apply( + HashElement{h->hash_ref()}, + PolicyTraits::element(static_cast(slot))); + } + static void transfer_slot_fn(void* set, void* dst, void* src) { + auto* h = static_cast(set); + h->transfer(static_cast(dst), static_cast(src)); + } + // Note: dealloc_fn will only be used if we have a non-standard allocator. + static void dealloc_fn(CommonFields& common, const PolicyFunctions&) { + auto* set = reinterpret_cast(&common); + + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(common.slot_array(), + sizeof(slot_type) * common.capacity()); + + common.infoz().Unregister(); + Deallocate( + &set->alloc_ref(), common.backing_array_start(), + common.alloc_size(sizeof(slot_type), alignof(slot_type))); + } + + static const PolicyFunctions& GetPolicyFunctions() { + static constexpr PolicyFunctions value = { + sizeof(slot_type), + &raw_hash_set::hash_slot_fn, + PolicyTraits::transfer_uses_memcpy() + ? TransferRelocatable + : &raw_hash_set::transfer_slot_fn, + (std::is_same>::value + ? &DeallocateStandard + : &raw_hash_set::dealloc_fn), + }; + return value; + } + + // Bundle together CommonFields plus other objects which might be empty. + // CompressedTuple will ensure that sizeof is not affected by any of the empty + // fields that occur after CommonFields. + absl::container_internal::CompressedTuple + settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}}; +}; + +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template +typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c) { + const auto initial_size = c->size(); + for (auto it = c->begin(), last = c->end(); it != last;) { + if (pred(*it)) { + c->erase(it++); + } else { + ++it; + } + } + return initial_size - c->size(); +} + +namespace hashtable_debug_internal { +template +struct HashtableDebugAccess> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = probe(set.common(), hash); + const ctrl_t* ctrl = set.control(); + while (true) { + container_internal::Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slot_array() + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MaskEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity(); + if (capacity == 0) return 0; + size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot)); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (auto it = c.begin(); it != c.end(); ++it) { + m += Traits::space_used(it.slot()); + } + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/third_party/absl/absl/container/internal/raw_hash_set_allocator_test.cc b/third_party/absl/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 000000000..05dcfaa61 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,514 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { +using ::testing::AnyOf; + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set owned; +}; + +template +class CheckedAlloc { + public: + template + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template + CheckedAlloc(const CheckedAlloc& that) + : id_(that.id_), state_(that.state_) {} + + template + struct rebind { + using other = CheckedAlloc; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant; + + using propagate_on_container_move_assignment = + std::integral_constant; + + using propagate_on_container_swap = + std::integral_constant; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits::max(); + + std::shared_ptr state_ = std::make_shared(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked; + using init_type = Tracked; + using key_type = int32_t; + + template + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits::construct( + *alloc, slot, std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template + static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { + return std::forward(f)(v, v); + } + + template + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward(f)(v.val(), v)) { + return std::forward(f)(v.val(), v); + } + + template + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward(f)(v.val(), std::move(v))) { + return std::forward(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc, Spec>; + + using Table = raw_hash_set, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest; +using NoPropagateOnCopy = PropagateTest; +using NoPropagateOnMove = PropagateTest; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + t1.insert(0); + Table u(std::move(t1)); + auto it = u.begin(); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + t1.insert(0); + Table u(std::move(t1)); + auto it = u.begin(); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + t1.insert(0); + Table u(std::move(t1), a1); + auto it = u.begin(); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + t1.insert(0); + Table u(std::move(t1), a1); + auto it = u.begin(); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(1, 2)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(1, 2)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3)); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + t1.insert(0); + Table u(0, a1); + u = std::move(t1); + auto it = u.begin(); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + t1.insert(0); + Table u(0, a1); + u = std::move(t1); + auto it = u.begin(); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + t1.insert(0); + Table u(0, a2); + u = std::move(t1); + auto it = u.begin(); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2)); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_THAT(it->num_moves(), AnyOf(0, 1)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + t1.insert(0); + Table u(0, a2); + u = std::move(t1); + auto it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2)); + EXPECT_THAT(it->num_moves(), AnyOf(1, 2)); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +// This allocator is similar to std::pmr::polymorphic_allocator. +// Note the disabled assignment. +template +class PAlloc { + template + friend class PAlloc; + + public: + // types + using value_type = T; + + PAlloc() noexcept = default; + explicit PAlloc(size_t id) noexcept : id_(id) {} + PAlloc(const PAlloc&) noexcept = default; + PAlloc& operator=(const PAlloc&) noexcept = delete; + + template + PAlloc(const PAlloc& that) noexcept : id_(that.id_) {} // NOLINT + + template + struct rebind { + using other = PAlloc; + }; + + constexpr PAlloc select_on_container_copy_construction() const { return {}; } + + // public member functions + T* allocate(size_t) { return new T; } + void deallocate(T* p, size_t) { delete p; } + + friend bool operator==(const PAlloc& a, const PAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); } + + private: + size_t id_ = std::numeric_limits::max(); +}; + +TEST(NoPropagateDeletedAssignment, CopyConstruct) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(t1); + EXPECT_EQ(t1.get_allocator(), PA(1)); + EXPECT_EQ(t2.get_allocator(), PA()); +} + +TEST(NoPropagateDeletedAssignment, CopyAssignment) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(PA{2}); + t1 = t2; + EXPECT_EQ(t1.get_allocator(), PA(1)); + EXPECT_EQ(t2.get_allocator(), PA(2)); +} + +TEST(NoPropagateDeletedAssignment, MoveAssignment) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(PA{2}); + t1 = std::move(t2); + EXPECT_EQ(t1.get_allocator(), PA(1)); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/third_party/absl/absl/container/internal/raw_hash_set_benchmark.cc b/third_party/absl/absl/container/internal/raw_hash_set_benchmark.cc new file mode 100644 index 000000000..88b073738 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set_benchmark.cc @@ -0,0 +1,576 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/strings/str_format.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : container_internal::hash_default_hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + IntTable() {} + using Base::Base; +}; + +struct string_generator { + template + std::string operator()(RNG& rng) const { + std::string res; + res.resize(size); + std::uniform_int_distribution printable_ascii(0x20, 0x7E); + std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); }); + return res; + } + + size_t size; +}; + +// Model a cache in steady state. +// +// On a table of size N, keep deleting the LRU entry and add a random one. +void BM_CacheInSteadyState(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + string_generator gen{12}; + StringTable t; + std::deque keys; + while (t.size() < state.range(0)) { + auto x = t.emplace(gen(rng), gen(rng)); + if (x.second) keys.push_back(x.first->first); + } + ABSL_RAW_CHECK(state.range(0) >= 10, ""); + while (state.KeepRunning()) { + // Some cache hits. + std::deque::const_iterator it; + for (int i = 0; i != 90; ++i) { + if (i % 10 == 0) it = keys.end(); + ::benchmark::DoNotOptimize(t.find(*--it)); + } + // Some cache misses. + for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng))); + ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str()); + keys.pop_front(); + while (true) { + auto x = t.emplace(gen(rng), gen(rng)); + if (x.second) { + keys.push_back(x.first->first); + break; + } + } + } + state.SetItemsProcessed(state.iterations()); + state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor())); +} + +template +void CacheInSteadyStateArgs(Benchmark* bm) { + // The default. + const float max_load_factor = 0.875; + // When the cache is at the steady state, the probe sequence will equal + // capacity if there is no reclamation of deleted slots. Pick a number large + // enough to make the benchmark slow for that case. + const size_t capacity = 1 << 10; + + // Check N data points to cover load factors in [0.4, 0.8). + const size_t kNumPoints = 10; + for (size_t i = 0; i != kNumPoints; ++i) + bm->Arg(std::ceil( + capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2)); +} +BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs); + +void BM_EraseEmplace(benchmark::State& state) { + IntTable t; + int64_t size = state.range(0); + for (int64_t i = 0; i < size; ++i) { + t.emplace(i); + } + while (state.KeepRunningBatch(size)) { + for (int64_t i = 0; i < size; ++i) { + benchmark::DoNotOptimize(t); + t.erase(i); + t.emplace(i); + } + } +} +BENCHMARK(BM_EraseEmplace)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16)->Arg(100); + +void BM_EndComparison(benchmark::State& state) { + StringTable t = {{"a", "a"}, {"b", "b"}}; + auto it = t.begin(); + for (auto i : state) { + benchmark::DoNotOptimize(t); + benchmark::DoNotOptimize(it); + benchmark::DoNotOptimize(it != t.end()); + } +} +BENCHMARK(BM_EndComparison); + +void BM_Iteration(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + string_generator gen{12}; + StringTable t; + + size_t capacity = state.range(0); + size_t size = state.range(1); + t.reserve(capacity); + + while (t.size() < size) { + t.emplace(gen(rng), gen(rng)); + } + + for (auto i : state) { + benchmark::DoNotOptimize(t); + for (auto it = t.begin(); it != t.end(); ++it) { + benchmark::DoNotOptimize(*it); + } + } +} + +BENCHMARK(BM_Iteration) + ->ArgPair(1, 1) + ->ArgPair(2, 2) + ->ArgPair(4, 4) + ->ArgPair(7, 7) + ->ArgPair(10, 10) + ->ArgPair(15, 15) + ->ArgPair(16, 16) + ->ArgPair(54, 54) + ->ArgPair(100, 100) + ->ArgPair(400, 400) + // empty + ->ArgPair(0, 0) + ->ArgPair(10, 0) + ->ArgPair(100, 0) + ->ArgPair(1000, 0) + ->ArgPair(10000, 0) + // sparse + ->ArgPair(100, 1) + ->ArgPair(1000, 10); + +void BM_CopyCtorSparseInt(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + size_t size = state.range(0); + t.reserve(size * 10); + while (t.size() < size) { + t.emplace(dist(rng)); + } + + for (auto i : state) { + IntTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtorSparseInt)->Range(128, 4096); + +void BM_CopyCtorInt(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + size_t size = state.range(0); + while (t.size() < size) { + t.emplace(dist(rng)); + } + + for (auto i : state) { + IntTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtorInt)->Range(128, 4096); + +void BM_CopyCtorString(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + StringTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + size_t size = state.range(0); + while (t.size() < size) { + t.emplace(std::to_string(dist(rng)), std::to_string(dist(rng))); + } + + for (auto i : state) { + StringTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtorString)->Range(128, 4096); + +void BM_CopyAssign(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + while (t.size() < state.range(0)) { + t.emplace(dist(rng)); + } + + IntTable t2; + for (auto _ : state) { + t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyAssign)->Range(128, 4096); + +void BM_RangeCtor(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + std::uniform_int_distribution dist(0, ~uint64_t{}); + std::vector values; + const size_t desired_size = state.range(0); + while (values.size() < desired_size) { + values.emplace_back(dist(rng)); + } + + for (auto unused : state) { + IntTable t{values.begin(), values.end()}; + benchmark::DoNotOptimize(t); + } +} +BENCHMARK(BM_RangeCtor)->Range(128, 65536); + +void BM_NoOpReserveIntTable(benchmark::State& state) { + IntTable t; + t.reserve(100000); + for (auto _ : state) { + benchmark::DoNotOptimize(t); + t.reserve(100000); + } +} +BENCHMARK(BM_NoOpReserveIntTable); + +void BM_NoOpReserveStringTable(benchmark::State& state) { + StringTable t; + t.reserve(100000); + for (auto _ : state) { + benchmark::DoNotOptimize(t); + t.reserve(100000); + } +} +BENCHMARK(BM_NoOpReserveStringTable); + +void BM_ReserveIntTable(benchmark::State& state) { + constexpr size_t kBatchSize = 1024; + size_t reserve_size = static_cast(state.range(0)); + + std::vector tables; + while (state.KeepRunningBatch(kBatchSize)) { + state.PauseTiming(); + tables.clear(); + tables.resize(kBatchSize); + state.ResumeTiming(); + for (auto& t : tables) { + benchmark::DoNotOptimize(t); + t.reserve(reserve_size); + benchmark::DoNotOptimize(t); + } + } +} +BENCHMARK(BM_ReserveIntTable)->Range(1, 64); + +void BM_ReserveStringTable(benchmark::State& state) { + constexpr size_t kBatchSize = 1024; + size_t reserve_size = static_cast(state.range(0)); + + std::vector tables; + while (state.KeepRunningBatch(kBatchSize)) { + state.PauseTiming(); + tables.clear(); + tables.resize(kBatchSize); + state.ResumeTiming(); + for (auto& t : tables) { + benchmark::DoNotOptimize(t); + t.reserve(reserve_size); + benchmark::DoNotOptimize(t); + } + } +} +BENCHMARK(BM_ReserveStringTable)->Range(1, 64); + +// Like std::iota, except that ctrl_t doesn't support operator++. +template +void Iota(CtrlIter begin, CtrlIter end, int value) { + for (; begin != end; ++begin, ++value) { + *begin = static_cast(value); + } +} + +void BM_Group_Match(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + h2_t h = 1; + for (auto _ : state) { + ::benchmark::DoNotOptimize(h); + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.Match(h)); + } +} +BENCHMARK(BM_Group_Match); + +void BM_Group_MaskEmpty(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmpty()); + } +} +BENCHMARK(BM_Group_MaskEmpty); + +void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted()); + } +} +BENCHMARK(BM_Group_MaskEmptyOrDeleted); + +void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -2); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); + } +} +BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); + +void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -2); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet()); + } +} +BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted); + +void BM_DropDeletes(benchmark::State& state) { + constexpr size_t capacity = (1 << 20) - 1; + std::vector ctrl(capacity + 1 + Group::kWidth); + ctrl[capacity] = ctrl_t::kSentinel; + std::vector pattern = {ctrl_t::kEmpty, static_cast(2), + ctrl_t::kDeleted, static_cast(2), + ctrl_t::kEmpty, static_cast(1), + ctrl_t::kDeleted}; + for (size_t i = 0; i != capacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + } + while (state.KeepRunning()) { + state.PauseTiming(); + std::vector ctrl_copy = ctrl; + state.ResumeTiming(); + ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity); + ::benchmark::DoNotOptimize(ctrl_copy[capacity]); + } +} +BENCHMARK(BM_DropDeletes); + +void BM_Resize(benchmark::State& state) { + // For now just measure a small cheap hash table since we + // are mostly interested in the overhead of type-erasure + // in resize(). + constexpr int kElements = 64; + const int kCapacity = kElements * 2; + + IntTable table; + for (int i = 0; i < kElements; i++) { + table.insert(i); + } + for (auto unused : state) { + table.rehash(0); + table.rehash(kCapacity); + } +} +BENCHMARK(BM_Resize); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +// These methods are here to make it easy to examine the assembly for targeted +// parts of the API. +auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table, + int64_t key) -> decltype(table->find(key)) { + return table->find(key); +} + +bool CodegenAbslRawHashSetInt64FindNeEnd( + absl::container_internal::IntTable* table, int64_t key) { + return table->find(key) != table->end(); +} + +// This is useful because the find isn't inlined but the iterator comparison is. +bool CodegenAbslRawHashSetStringFindNeEnd( + absl::container_internal::StringTable* table, const std::string& key) { + return table->find(key) != table->end(); +} + +auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table, + int64_t key) + -> decltype(table->insert(key)) { + return table->insert(key); +} + +bool CodegenAbslRawHashSetInt64Contains( + absl::container_internal::IntTable* table, int64_t key) { + return table->contains(key); +} + +void CodegenAbslRawHashSetInt64Iterate( + absl::container_internal::IntTable* table) { + for (auto x : *table) benchmark::DoNotOptimize(x); +} + +int odr = + (::benchmark::DoNotOptimize(std::make_tuple( + &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd, + &CodegenAbslRawHashSetStringFindNeEnd, + &CodegenAbslRawHashSetInt64Insert, &CodegenAbslRawHashSetInt64Contains, + &CodegenAbslRawHashSetInt64Iterate)), + 1); diff --git a/third_party/absl/absl/container/internal/raw_hash_set_probe_benchmark.cc b/third_party/absl/absl/container/internal/raw_hash_set_probe_benchmark.cc new file mode 100644 index 000000000..5d4184b26 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set_probe_benchmark.cc @@ -0,0 +1,592 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates probe length statistics for many combinations of key types and key +// distributions, all using the default hash function for swisstable. + +#include +#include // NOLINT +#include + +#include "absl/base/no_destructor.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" +#include "absl/strings/strip.h" +#include "absl/types/optional.h" + +namespace { + +enum class OutputStyle { kRegular, kBenchmark }; + +// The --benchmark command line flag. +// This is populated from main(). +// When run in "benchmark" mode, we have different output. This allows +// A/B comparisons with tools like `benchy`. +absl::string_view benchmarks; + +OutputStyle output() { + return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular; +} + +template +struct Policy { + using slot_type = T; + using key_type = T; + using init_type = T; + + template + static void construct(allocator_type* alloc, slot_type* slot, + const Arg& arg) { + std::allocator_traits::construct(*alloc, slot, arg); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + static slot_type& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const slot_type& arg) + -> decltype(std::forward(f)(arg, arg)) { + return std::forward(f)(arg, arg); + } +}; + +absl::BitGen& GlobalBitGen() { + static absl::NoDestructor value; + return *value; +} + +// Keeps a pool of allocations and randomly gives one out. +// This introduces more randomization to the addresses given to swisstable and +// should help smooth out this factor from probe length calculation. +template +class RandomizedAllocator { + public: + using value_type = T; + + RandomizedAllocator() = default; + template + RandomizedAllocator(RandomizedAllocator) {} // NOLINT + + static T* allocate(size_t n) { + auto& pointers = GetPointers(n); + // Fill the pool + while (pointers.size() < kRandomPool) { + pointers.push_back(std::allocator{}.allocate(n)); + } + + // Choose a random one. + size_t i = absl::Uniform(GlobalBitGen(), 0, pointers.size()); + T* result = pointers[i]; + pointers[i] = pointers.back(); + pointers.pop_back(); + return result; + } + + static void deallocate(T* p, size_t n) { + // Just put it back on the pool. No need to release the memory. + GetPointers(n).push_back(p); + } + + private: + // We keep at least kRandomPool allocations for each size. + static constexpr size_t kRandomPool = 20; + + static std::vector& GetPointers(size_t n) { + static absl::NoDestructor>> m; + return (*m)[n]; + } +}; + +template +struct DefaultHash { + using type = absl::container_internal::hash_default_hash; +}; + +template +using DefaultHashT = typename DefaultHash::type; + +template +struct Table : absl::container_internal::raw_hash_set< + Policy, DefaultHashT, + absl::container_internal::hash_default_eq, + RandomizedAllocator> {}; + +struct LoadSizes { + size_t min_load; + size_t max_load; +}; + +LoadSizes GetMinMaxLoadSizes() { + static const auto sizes = [] { + Table t; + + // First, fill enough to have a good distribution. + constexpr size_t kMinSize = 10000; + while (t.size() < kMinSize) t.insert(t.size()); + + const auto reach_min_load_factor = [&] { + const double lf = t.load_factor(); + while (lf <= t.load_factor()) t.insert(t.size()); + }; + + // Then, insert until we reach min load factor. + reach_min_load_factor(); + const size_t min_load_size = t.size(); + + // Keep going until we hit min load factor again, then go back one. + t.insert(t.size()); + reach_min_load_factor(); + + return LoadSizes{min_load_size, t.size() - 1}; + }(); + return sizes; +} + +struct Ratios { + double min_load; + double avg_load; + double max_load; +}; + +// See absl/container/internal/hashtable_debug.h for details on +// probe length calculation. +template +Ratios CollectMeanProbeLengths() { + const auto min_max_sizes = GetMinMaxLoadSizes(); + + ElemFn elem; + using Key = decltype(elem()); + Table t; + + Ratios result; + while (t.size() < min_max_sizes.min_load) t.insert(elem()); + result.min_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2) + t.insert(elem()); + result.avg_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + while (t.size() < min_max_sizes.max_load) t.insert(elem()); + result.max_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + return result; +} + +template +uintptr_t PointerForAlignment() { + alignas(Align) static constexpr uintptr_t kInitPointer = 0; + return reinterpret_cast(&kInitPointer); +} + +// This incomplete type is used for testing hash of pointers of different +// alignments. +// NOTE: We are generating invalid pointer values on the fly with +// reinterpret_cast. There are not "safely derived" pointers so using them is +// technically UB. It is unlikely to be a problem, though. +template +struct Ptr; + +template +Ptr* MakePtr(uintptr_t v) { + if (sizeof(v) == 8) { + constexpr int kCopyBits = 16; + // Ensure high bits are all the same. + v = static_cast(static_cast(v << kCopyBits) >> + kCopyBits); + } + return reinterpret_cast*>(v); +} + +struct IntIdentity { + uint64_t i; + friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; } + IntIdentity operator++(int) { return IntIdentity{i++}; } +}; + +template +struct PtrIdentity { + explicit PtrIdentity(uintptr_t val = PointerForAlignment()) : i(val) {} + uintptr_t i; + friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; } + PtrIdentity operator++(int) { + PtrIdentity p(i); + i += Align; + return p; + } +}; + +constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt"; + +template +struct String { + std::string value; + static std::string Make(uint32_t v) { + return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)}; + } +}; + +template <> +struct DefaultHash { + struct type { + size_t operator()(IntIdentity t) const { return t.i; } + }; +}; + +template +struct DefaultHash> { + struct type { + size_t operator()(PtrIdentity t) const { return t.i; } + }; +}; + +template +struct Sequential { + T operator()() const { return current++; } + mutable T current{}; +}; + +template +struct Sequential*> { + Ptr* operator()() const { + auto* result = MakePtr(current); + current += Align; + return result; + } + mutable uintptr_t current = PointerForAlignment(); +}; + + +template +struct Sequential> { + std::string operator()() const { return String::Make(current++); } + mutable uint32_t current = 0; +}; + +template +struct Sequential> { + mutable Sequential tseq; + mutable Sequential useq; + + using RealT = decltype(tseq()); + using RealU = decltype(useq()); + + mutable std::vector ts; + mutable std::vector us; + mutable size_t ti = 0, ui = 0; + + std::pair operator()() const { + std::pair value{get_t(), get_u()}; + if (ti == 0) { + ti = ui + 1; + ui = 0; + } else { + --ti; + ++ui; + } + return value; + } + + RealT get_t() const { + while (ti >= ts.size()) ts.push_back(tseq()); + return ts[ti]; + } + + RealU get_u() const { + while (ui >= us.size()) us.push_back(useq()); + return us[ui]; + } +}; + +template +struct AlmostSequential { + mutable Sequential current; + + auto operator()() const -> decltype(current()) { + while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.) + current(); + return current(); + } +}; + +struct Uniform { + template + T operator()(T) const { + return absl::Uniform(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0}); + } +}; + +struct Gaussian { + template + T operator()(T) const { + double d; + do { + d = absl::Gaussian(GlobalBitGen(), 1e6, 1e4); + } while (d <= 0 || d > std::numeric_limits::max() / 2); + return static_cast(d); + } +}; + +struct Zipf { + template + T operator()(T) const { + return absl::Zipf(GlobalBitGen(), std::numeric_limits::max(), 1.6); + } +}; + +template +struct Random { + T operator()() const { return Dist{}(T{}); } +}; + +template +struct Random*, Dist> { + Ptr* operator()() const { + return MakePtr(Random{}() * Align); + } +}; + +template +struct Random { + IntIdentity operator()() const { + return IntIdentity{Random{}()}; + } +}; + +template +struct Random, Dist> { + PtrIdentity operator()() const { + return PtrIdentity{Random{}() * Align}; + } +}; + +template +struct Random, Dist> { + std::string operator()() const { + return String::Make(Random{}()); + } +}; + +template +struct Random, Dist> { + auto operator()() const + -> decltype(std::make_pair(Random{}(), Random{}())) { + return std::make_pair(Random{}(), Random{}()); + } +}; + +template +std::string Name(); + +std::string Name(uint32_t*) { return "u32"; } +std::string Name(uint64_t*) { return "u64"; } +std::string Name(IntIdentity*) { return "IntIdentity"; } + +template +std::string Name(Ptr**) { + return absl::StrCat("Ptr", Align); +} + +template +std::string Name(PtrIdentity*) { + return absl::StrCat("PtrIdentity", Align); +} + +template +std::string Name(String*) { + return small ? "StrS" : "StrL"; +} + +template +std::string Name(std::pair*) { + if (output() == OutputStyle::kBenchmark) + return absl::StrCat("P_", Name(), "_", Name()); + return absl::StrCat("P<", Name(), ",", Name(), ">"); +} + +template +std::string Name(Sequential*) { + return "Sequential"; +} + +template +std::string Name(AlmostSequential*) { + return absl::StrCat("AlmostSeq_", P); +} + +template +std::string Name(Random*) { + return "UnifRand"; +} + +template +std::string Name(Random*) { + return "GausRand"; +} + +template +std::string Name(Random*) { + return "ZipfRand"; +} + +template +std::string Name() { + return Name(static_cast(nullptr)); +} + +constexpr int kNameWidth = 15; +constexpr int kDistWidth = 16; + +bool CanRunBenchmark(absl::string_view name) { + static const absl::NoDestructor> filter([] { + return benchmarks.empty() || benchmarks == "all" + ? absl::nullopt + : absl::make_optional(std::regex(std::string(benchmarks))); + }()); + return !filter->has_value() || std::regex_search(std::string(name), **filter); +} + +struct Result { + std::string name; + std::string dist_name; + Ratios ratios; +}; + +template +void RunForTypeAndDistribution(std::vector& results) { + std::string name = absl::StrCat(Name(), "/", Name()); + // We have to check against all three names (min/avg/max) before we run it. + // If any of them is enabled, we run it. + if (!CanRunBenchmark(absl::StrCat(name, "/min")) && + !CanRunBenchmark(absl::StrCat(name, "/avg")) && + !CanRunBenchmark(absl::StrCat(name, "/max"))) { + return; + } + results.push_back({Name(), Name(), CollectMeanProbeLengths()}); +} + +template +void RunForType(std::vector& results) { + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); +#ifdef NDEBUG + // Disable these in non-opt mode because they take too long. + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); +#endif // NDEBUG +} + +} // namespace + +int main(int argc, char** argv) { + // Parse the benchmark flags. Ignore all of them except the regex pattern. + for (int i = 1; i < argc; ++i) { + absl::string_view arg = argv[i]; + const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; }; + + if (absl::ConsumePrefix(&arg, "--benchmark_filter")) { + if (arg == "") { + // --benchmark_filter X + benchmarks = next(); + } else if (absl::ConsumePrefix(&arg, "=")) { + // --benchmark_filter=X + benchmarks = arg; + } + } + + // Any --benchmark flag turns on the mode. + if (absl::ConsumePrefix(&arg, "--benchmark")) { + if (benchmarks.empty()) benchmarks="all"; + } + } + + std::vector results; + RunForType(results); + RunForType(results); + RunForType*>(results); + RunForType*>(results); + RunForType*>(results); + RunForType*>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>>(results); + RunForType, uint64_t>>(results); + RunForType>>(results); + RunForType, uint64_t>>(results); + + switch (output()) { + case OutputStyle::kRegular: + absl::PrintF("%-*s%-*s Min Avg Max\n%s\n", kNameWidth, + "Type", kDistWidth, "Distribution", + std::string(kNameWidth + kDistWidth + 10 * 3, '-')); + for (const auto& result : results) { + absl::PrintF("%-*s%-*s %8.4f %8.4f %8.4f\n", kNameWidth, result.name, + kDistWidth, result.dist_name, result.ratios.min_load, + result.ratios.avg_load, result.ratios.max_load); + } + break; + case OutputStyle::kBenchmark: { + absl::PrintF("{\n"); + absl::PrintF(" \"benchmarks\": [\n"); + absl::string_view comma; + for (const auto& result : results) { + auto print = [&](absl::string_view stat, double Ratios::*val) { + std::string name = + absl::StrCat(result.name, "/", result.dist_name, "/", stat); + // Check the regex again. We might had have enabled only one of the + // stats for the benchmark. + if (!CanRunBenchmark(name)) return; + absl::PrintF(" %s{\n", comma); + absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val); + absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val); + absl::PrintF(" \"iterations\": 1,\n"); + absl::PrintF(" \"name\": \"%s\",\n", name); + absl::PrintF(" \"time_unit\": \"ns\"\n"); + absl::PrintF(" }\n"); + comma = ","; + }; + print("min", &Ratios::min_load); + print("avg", &Ratios::avg_load); + print("max", &Ratios::max_load); + } + absl::PrintF(" ],\n"); + absl::PrintF(" \"context\": {\n"); + absl::PrintF(" }\n"); + absl::PrintF("}\n"); + break; + } + } + + return 0; +} diff --git a/third_party/absl/absl/container/internal/raw_hash_set_test.cc b/third_party/absl/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 000000000..f9797f565 --- /dev/null +++ b/third_party/absl/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,2684 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/prefetch.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/container/internal/test_allocator.h" +#include "absl/hash/hash.h" +#include "absl/log/log.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slot_array()) { + return c.slot_array(); + } + template + static size_t CountTombstones(const C& c) { + return c.common().TombstonesCount(); + } +}; + +namespace { + +using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::Ge; +using ::testing::Lt; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +// Convenience function to static cast to ctrl_t. +ctrl_t CtrlT(int i) { return static_cast(i); } + +TEST(Util, NormalizeCapacity) { + EXPECT_EQ(1, NormalizeCapacity(0)); + EXPECT_EQ(1, NormalizeCapacity(1)); + EXPECT_EQ(3, NormalizeCapacity(2)); + EXPECT_EQ(3, NormalizeCapacity(3)); + EXPECT_EQ(7, NormalizeCapacity(4)); + EXPECT_EQ(7, NormalizeCapacity(7)); + EXPECT_EQ(15, NormalizeCapacity(8)); + EXPECT_EQ(15, NormalizeCapacity(15)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); +} + +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. + for (size_t growth = 0; growth < 10000; ++growth) { + SCOPED_TRACE(growth); + size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); + // The capacity is large enough for `growth`. + EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + // For (capacity+1) < kWidth, growth should equal capacity. + if (capacity + 1 < Group::kWidth) { + EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); + } else { + EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); + } + if (growth != 0 && capacity > 1) { + // There is no smaller capacity that works. + EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + } + } + + for (size_t capacity = Group::kWidth - 1; capacity < 10000; + capacity = 2 * capacity + 1) { + SCOPED_TRACE(capacity); + size_t growth = CapacityToGrowth(capacity); + EXPECT_THAT(growth, Lt(capacity)); + EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); + EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); + } +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +TEST(Group, Match) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MaskEmpty) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MaskFull) { + if (Group::kWidth == 16) { + ctrl_t group[] = { + ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1), + CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskFull(), + ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, + ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MaskEmptyOrDeleted) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), + ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = ctrl_t::kSentinel; + std::vector pattern = { + ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2), + ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel); + for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = ctrl_t::kSentinel; + if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty; + if (IsFull(expected)) expected = ctrl_t::kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << static_cast(pattern[i % pattern.size()]); + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted}; + const std::vector full_examples = { + CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3), + CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ(Group::kWidth / 2, + Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +template +struct ValuePolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static std::integral_constant transfer( + Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + return {}; + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } +}; + +using IntPolicy = ValuePolicy; +using Uint8Policy = ValuePolicy; + +using TranferableIntPolicy = ValuePolicy; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + explicit slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : absl::Hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() = default; + using Base::Base; +}; + +template +struct ValueTable + : raw_hash_set, hash_default_hash, + std::equal_to, std::allocator> { + using Base = typename ValueTable::raw_hash_set; + using Base::Base; +}; + +using IntTable = ValueTable; +using Uint8Table = ValueTable; + +using TransferableIntTable = ValueTable; + +template +struct CustomAlloc : std::allocator { + CustomAlloc() = default; + + template + explicit CustomAlloc(const CustomAlloc& /*other*/) {} + + template + struct rebind { + using other = CustomAlloc; + }; +}; + +struct CustomAllocIntTable + : raw_hash_set, + std::equal_to, CustomAlloc> { + using Base = typename CustomAllocIntTable::raw_hash_set; + using Base::Base; +}; + +struct MinimumAlignmentUint8Table + : raw_hash_set, + std::equal_to, MinimumAlignmentAlloc> { + using Base = typename MinimumAlignmentUint8Table::raw_hash_set; + using Base::Base; +}; + +// Allows for freezing the allocator to expect no further allocations. +template +struct FreezableAlloc : std::allocator { + explicit FreezableAlloc(bool* f) : frozen(f) {} + + template + explicit FreezableAlloc(const FreezableAlloc& other) + : frozen(other.frozen) {} + + template + struct rebind { + using other = FreezableAlloc; + }; + + T* allocate(size_t n) { + EXPECT_FALSE(*frozen); + return std::allocator::allocate(n); + } + + bool* frozen; +}; + +struct BadFastHash { + template + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadHashFreezableIntTable + : raw_hash_set, + FreezableAlloc> { + using Base = typename BadHashFreezableIntTable::raw_hash_set; + using Base::Base; +}; + +struct BadTable : raw_hash_set, + std::allocator> { + using Base = typename BadTable::raw_hash_set; + BadTable() = default; + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty>::value, ""); + static_assert(std::is_empty>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + struct GenerationData { + size_t reserved_growth; + size_t reservation_size; + GenerationType* generation; + }; + +// Ignore unreachable-code warning. Compiler thinks one branch of each ternary +// conditional is unreachable. +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" +#endif + constexpr size_t mock_size = sizeof(MockTable); + constexpr size_t generation_size = + SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0; +#if defined(__clang__) +#pragma clang diagnostic pop +#endif + + EXPECT_EQ( + mock_size + generation_size, + sizeof( + raw_hash_set, std::allocator>)); + + EXPECT_EQ( + mock_size + sizeof(StatefulHash) + generation_size, + sizeof( + raw_hash_set, std::allocator>)); +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, EraseInSmallTables) { + for (int64_t size = 0; size < 64; ++size) { + IntTable t; + for (int64_t i = 0; i < size; ++i) { + t.insert(i); + } + for (int64_t i = 0; i < size; ++i) { + t.erase(i); + EXPECT_EQ(t.size(), size - i - 1); + for (int64_t j = i + 1; j < size; ++j) { + EXPECT_THAT(*t.find(j), j); + } + } + EXPECT_TRUE(t.empty()); + } +} + +TEST(Table, InsertWithinCapacity) { + IntTable t; + t.reserve(10); + const size_t original_capacity = t.capacity(); + const auto addr = [&](int i) { + return reinterpret_cast(&*t.find(i)); + }; + // Inserting an element does not change capacity. + t.insert(0); + EXPECT_THAT(t.capacity(), original_capacity); + const uintptr_t original_addr_0 = addr(0); + // Inserting another element does not rehash. + t.insert(1); + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); + // Inserting lots of duplicate elements does not rehash. + for (int i = 0; i < 100; ++i) { + t.insert(i % 10); + } + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); + // Inserting a range of duplicate elements does not rehash. + std::vector dup_range; + for (int i = 0; i < 100; ++i) { + dup_range.push_back(i % 10); + } + t.insert(dup_range.begin(), dup_range.end()); + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); +} + +template +class SmallTableResizeTest : public testing::Test {}; + +TYPED_TEST_SUITE_P(SmallTableResizeTest); + +TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) { + TypeParam t; + for (int i = 0; i < 32; ++i) { + t.insert(i); + ASSERT_EQ(t.size(), i + 1); + for (int j = 0; j < i + 1; ++j) { + EXPECT_TRUE(t.find(j) != t.end()); + EXPECT_EQ(*t.find(j), j); + } + } +} + +TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) { + TypeParam t; + for (size_t source_size = 0; source_size < 32; ++source_size) { + for (size_t target_size = source_size; target_size < 32; ++target_size) { + for (bool rehash : {false, true}) { + for (size_t i = 0; i < source_size; ++i) { + t.insert(static_cast(i)); + } + if (rehash) { + t.rehash(target_size); + } else { + t.reserve(target_size); + } + for (size_t i = 0; i < source_size; ++i) { + EXPECT_TRUE(t.find(static_cast(i)) != t.end()); + EXPECT_EQ(*t.find(static_cast(i)), static_cast(i)); + } + } + } + } +} + +TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) { + TypeParam t; + for (size_t source_size = 0; source_size < 32; ++source_size) { + for (size_t target_size = 0; target_size <= source_size; ++target_size) { + size_t inserted_count = std::min(source_size, 5); + for (size_t i = 0; i < inserted_count; ++i) { + t.insert(static_cast(i)); + } + t.rehash(target_size); + for (size_t i = 0; i < inserted_count; ++i) { + EXPECT_TRUE(t.find(static_cast(i)) != t.end()); + EXPECT_EQ(*t.find(static_cast(i)), static_cast(i)); + } + } + } +} + +REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable, + ResizeGrowSmallTables, ResizeReduceSmallTables); +using SmallTableTypes = ::testing::Types; +INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest, + SmallTableResizeTest, SmallTableTypes); + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +int decompose_copy_constructed; +int decompose_copy_assigned; +int decompose_move_constructed; +int decompose_move_assigned; +struct DecomposeType { + DecomposeType(int i = 0) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + DecomposeType(const DecomposeType& other) : i(other.i) { + ++decompose_copy_constructed; + } + DecomposeType& operator=(const DecomposeType& other) { + ++decompose_copy_assigned; + i = other.i; + return *this; + } + DecomposeType(DecomposeType&& other) : i(other.i) { + ++decompose_move_constructed; + } + DecomposeType& operator=(DecomposeType&& other) { + ++decompose_move_assigned; + i = other.i; + return *this; + } + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(const DecomposeType& a, const DecomposeType& b) const { + return a.i == b.i; + } + bool operator()(const DecomposeType& a, int b) const { return a.i == b; } + bool operator()(const DecomposeType& a, const char* b) const { + return a.i == *b; + } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template + static void construct(void*, DecomposeType* slot, T&& v) { + ::new (slot) DecomposeType(std::forward(v)); + } + static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); } + static DecomposeType& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +template +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + const int elem_vector_count = 256; + std::vector elem_vector(elem_vector_count, DecomposeType{0}); + std::iota(elem_vector.begin(), elem_vector.end(), 0); + + using DecomposeSet = + raw_hash_set>; + DecomposeSet set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + decompose_copy_constructed = 0; + decompose_copy_assigned = 0; + decompose_move_constructed = 0; + decompose_move_assigned = 0; + int expected_copy_constructed = 0; + int expected_move_constructed = 0; + { // raw_hash_set(first, last) with random-access iterators + DecomposeSet set2(elem_vector.begin(), elem_vector.end()); + // Expect exactly one copy-constructor call for each element if no + // rehashing is done. + expected_copy_constructed += elem_vector_count; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + EXPECT_EQ(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + } + + { // raw_hash_set(first, last) with forward iterators + std::list elem_list(elem_vector.begin(), elem_vector.end()); + expected_copy_constructed = decompose_copy_constructed; + DecomposeSet set2(elem_list.begin(), elem_list.end()); + // Expect exactly N elements copied into set, expect at most 2*N elements + // moving internally for all resizing needed (for a growth factor of 2). + expected_copy_constructed += elem_vector_count; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + expected_move_constructed += elem_vector_count; + EXPECT_LT(expected_move_constructed, decompose_move_constructed); + expected_move_constructed += elem_vector_count; + EXPECT_GE(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + expected_copy_constructed = decompose_copy_constructed; + expected_move_constructed = decompose_move_constructed; + } + + { // insert(first, last) + DecomposeSet set2; + set2.insert(elem_vector.begin(), elem_vector.end()); + // Expect exactly N elements copied into set, expect at most 2*N elements + // moving internally for all resizing needed (for a growth factor of 2). + const int expected_new_elements = elem_vector_count; + const int expected_max_element_moves = 2 * elem_vector_count; + expected_copy_constructed += expected_new_elements; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + expected_move_constructed += expected_max_element_moves; + EXPECT_GE(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + expected_copy_constructed = decompose_copy_constructed; + expected_move_constructed = decompose_move_constructed; + } +} + +TEST(Table, Decompose) { + if (SwisstableGenerationsEnabled()) { + GTEST_SKIP() << "Generations being enabled causes extra rehashes."; + } + + TestDecompose(false); + + struct TransparentHashIntOverload { + size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(const DecomposeType& a, const DecomposeType& b) const { + return a.i == b.i; + } + bool operator()(const DecomposeType& a, int b) const { return a.i == b; } + }; + TestDecompose(true); + TestDecompose(true); + TestDecompose(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set, + std::allocator> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + if (SwisstableGenerationsEnabled()) { + GTEST_SKIP() << "Generations being enabled causes extra rehashes."; + } + + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then fill up to MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as ctrl_t::kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + if (SwisstableGenerationsEnabled()) { + GTEST_SKIP() << "Generations being enabled causes extra rehashes."; + } + + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2 + 1; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(static_cast(std::abs(original - second)), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, EraseMaintainsValidIterator) { + IntTable t; + const int kNumElements = 100; + for (int i = 0; i < kNumElements; i++) { + EXPECT_TRUE(t.emplace(i).second); + } + EXPECT_EQ(t.size(), kNumElements); + + int num_erase_calls = 0; + auto it = t.begin(); + while (it != t.end()) { + t.erase(it++); + num_erase_calls++; + } + + EXPECT_TRUE(t.empty()); + EXPECT_EQ(num_erase_calls, kNumElements); +} + +TEST(Table, EraseBeginEnd) { + IntTable t; + for (int i = 0; i < 10; ++i) t.insert(i); + EXPECT_EQ(t.size(), 10); + t.erase(t.begin(), t.end()); + EXPECT_EQ(t.size(), 0); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, + IntTable* t) -> std::vector { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector single_table_ratios; + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector> pecentile_ratios; + std::vector> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed( + const std::vector& keys, size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast(static_cast(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + } + LOG(FATAL) << "Unknown Group width"; + return {}; +} + +// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + LOG(INFO) << size << " " << stat; + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution dist(0, keys.size() - 1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.4, + 0.6, + {{0.95, 0.5}}, + {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + } + LOG(FATAL) << "Unknown Group width"; + return {}; +} + +// TODO(b/80415403): Figure out why this test is so flaky. +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + LOG(INFO) << size << " " << stat; + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair; + struct Q { + operator P() const { return {}; } // NOLINT + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set, + std::equal_to, Alloc> { + ExplicitAllocIntTable() = default; +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveSelfAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = std::move(*&t); + // As long as we don't crash, it's fine. +} + +TEST(Table, Equality) { + StringTable t; + std::vector> v = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector> v1 = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector> v1 = {{"b", "b"}, + {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + absl::allocator_traits>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template +using CallFind = decltype(std::declval().find(17)); + +template +using CallErase = decltype(std::declval().erase(17)); + +template +using CallExtract = decltype(std::declval().extract(17)); + +template +using CallPrefetch = decltype(std::declval().prefetch(17)); + +template +using CallCount = decltype(std::declval().count(17)); + +template