/rules.ninja
*~
.emacs.desktop
+.tags
# Build system generated files #
################################
The list of most significant changes made over time in
Intel(R) Threading Building Blocks (Intel(R) TBB).
+Intel TBB 2019 Update 7
+TBB_INTERFACE_VERSION == 11007
+
+Changes (w.r.t. Intel TBB 2019 Update 6):
+
+- Added TBBMALLOC_SET_HUGE_SIZE_THRESHOLD parameter to set the lower
+ bound for allocations that are not released back to OS unless
+ a cleanup is explicitly requested.
+- Added zip_iterator::base() method to get the tuple of underlying
+ iterators.
+- Improved async_node to never block a thread that sends a message
+ through its gateway.
+- Extended decrement port of the tbb::flow::limiter_node to accept
+ messages of integral types.
+- Added support of Windows* to the CMake module TBBInstallConfig.
+- Added packaging of CMake configuration files to TBB packages built
+ using build/build.py script
+ (https://github.com/intel/tbb/issues/141).
+
+Changes affecting backward compatibility:
+
+- Removed the number_of_decrement_predecessors parameter from the
+ constructor of flow::limiter_node. To allow its usage, set
+ TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR macro to 1.
+
+Preview Features:
+
+- Added ordered associative containers:
+ concurrent_{map,multimap,set,multiset} (requires C++11).
+
+Open-source contributions integrated:
+
+- Fixed makefiles to properly obtain the GCC version for GCC 7
+ and later (https://github.com/intel/tbb/pull/147) by Timmmm.
+
+------------------------------------------------------------------------
Intel TBB 2019 Update 6
TBB_INTERFACE_VERSION == 11006
-# Threading Building Blocks 2019 Update 5
-[![Stable release](https://img.shields.io/badge/version-2019_U6-green.svg)](https://github.com/01org/tbb/releases/tag/2019_U6)
+# Threading Building Blocks 2019 Update 7
+[![Stable release](https://img.shields.io/badge/version-2019_U7-green.svg)](https://github.com/01org/tbb/releases/tag/2019_U7)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
Threading Building Blocks (TBB) lets you easily write parallel C++ programs that take
endif
ifndef runtime
- gcc_version:=$(shell gcc -dumpversion)
+ gcc_version:=$(shell gcc -dumpfullversion -dumpversion)
os_version:=$(shell uname -r)
os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
export runtime:=cc$(gcc_version)_kernel$(os_kernel_version)
# gcc 6.0 and later have -flifetime-dse option that controls
# elimination of stores done outside the object lifetime
-ifneq (,$(shell gcc -dumpversion | egrep "^([6-9])"))
+ifneq (,$(shell gcc -dumpfullversion -dumpversion | egrep "^([6-9])"))
# keep pre-contruction stores for zero initialization
DSE_KEY = -flifetime-dse=1
endif
endif
ifndef runtime
- gcc_version:=$(shell gcc -dumpversion)
+ gcc_version:=$(shell gcc -dumpfullversion -dumpversion)
os_version:=$(shell uname -r)
os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
export runtime:=cc$(gcc_version)_kernel$(os_kernel_version)
default_prefix = jp(default_prefix, 'Library') # conda-specific by default on Windows
parser = argparse.ArgumentParser()
-parser.add_argument('--tbbroot', default='.', help='Take Intel TBB from here')
-parser.add_argument('--prefix', default=default_prefix, help='Prefix')
-parser.add_argument('--prebuilt', default=[], action='append', help='Directories to find prebuilt files')
-parser.add_argument('--no-rebuild', default=False, action='store_true', help='do not rebuild')
-parser.add_argument('--install', default=False, action='store_true', help='install all')
-parser.add_argument('--install-libs', default=False, action='store_true', help='install libs')
-parser.add_argument('--install-devel', default=False, action='store_true', help='install devel')
-parser.add_argument('--install-docs', default=False, action='store_true', help='install docs')
-parser.add_argument('--install-python',default=False, action='store_true', help='install python module')
-parser.add_argument('--make-tool', default='make', help='Use different make command instead')
-parser.add_argument('--copy-tool', default=None, help='Use this command for copying ($ tool file dest-dir)')
-parser.add_argument('--build-args', default="", help='specify extra build args')
-parser.add_argument('--build-prefix', default='local', help='build dir prefix')
+parser.add_argument('--tbbroot', default='.', help='Take Intel TBB from here')
+parser.add_argument('--prefix', default=default_prefix, help='Prefix')
+parser.add_argument('--prebuilt', default=[], action='append', help='Directories to find prebuilt files')
+parser.add_argument('--no-rebuild', default=False, action='store_true', help='do not rebuild')
+parser.add_argument('--install', default=False, action='store_true', help='install all')
+parser.add_argument('--install-libs', default=False, action='store_true', help='install libs')
+parser.add_argument('--install-devel', default=False, action='store_true', help='install devel')
+parser.add_argument('--install-docs', default=False, action='store_true', help='install docs')
+parser.add_argument('--install-python', default=False, action='store_true', help='install python module')
+parser.add_argument('--make-tool', default='make', help='Use different make command instead')
+parser.add_argument('--copy-tool', default=None, help='Use this command for copying ($ tool file dest-dir)')
+parser.add_argument('--build-args', default="", help='specify extra build args')
+parser.add_argument('--build-prefix', default='local', help='build dir prefix')
+parser.add_argument('--cmake-dir', help='directory to install CMake configuraion files. Default: <prefix>/lib/cmake/tbb')
if is_win:
parser.add_argument('--msbuild', default=False, action='store_true', help='Use msbuild')
parser.add_argument('--vs', default="2012", help='select VS version for build')
else:
install_cp = shutil.copy
-bin_dir = jp(args.prefix, "bin")
-lib_dir = jp(args.prefix, "lib")
-inc_dir = jp(args.prefix, 'include')
-doc_dir = jp(args.prefix, 'share', 'doc', 'tbb')
+bin_dir = jp(args.prefix, "bin")
+lib_dir = jp(args.prefix, "lib")
+inc_dir = jp(args.prefix, 'include')
+doc_dir = jp(args.prefix, 'share', 'doc', 'tbb')
+cmake_dir = jp(args.prefix, "lib", "cmake", "tbb") if args.cmake_dir is None else args.cmake_dir
+
if is_win:
os.environ["OS"] = "Windows_NT" # make sure TBB will interpret it corretly
libext = '.dll'
files = [f for f in filenames if not '.html' in f]
append_files(files, jp(inc_dir, rootdir.split('include')[1][1:]), paths=(rootdir,))
+ # Preparing CMake configuration files
+ cmake_build_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release', 'cmake_configs')
+ assert system('cmake -DINSTALL_DIR=%s -DSYSTEM_NAME=%s -DTBB_VERSION_FILE=%s -DINC_REL_PATH=%s -DLIB_REL_PATH=%s -DBIN_REL_PATH=%s -P %s' % \
+ (cmake_build_dir,
+ platform.system(),
+ jp(args.tbbroot, 'include', 'tbb', 'tbb_stddef.h'),
+ os.path.relpath(inc_dir, cmake_dir),
+ os.path.relpath(lib_dir, cmake_dir),
+ os.path.relpath(bin_dir, cmake_dir),
+ jp(args.tbbroot, 'cmake', 'tbb_config_installer.cmake'))) == 0
+ append_files(['TBBConfig.cmake', 'TBBConfigVersion.cmake'], cmake_dir, paths=[cmake_build_dir])
+
if args.install_python: # RML part
irml_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release')
run_make('-C src tbb_build_prefix=%s %s python_rml'% (args.build_prefix, args.build_args))
WScript.Echo("unknown");
}
} else {
- tmpExec = WshShell.Exec(compilerPath + " -dumpversion");
+ tmpExec = WshShell.Exec(compilerPath + " -dumpfullversion -dumpversion");
var gccVersion = tmpExec.StdOut.ReadLine();
if (WScript.Arguments(0) == "/runtime") {
WScript.Echo("mingw" + gccVersion);
C_FLAGS = $(CPLUS_FLAGS)
# gcc 4.2 and higher support OpenMP
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^(4\.[2-9]|[5-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^(4\.[2-9]|[5-9])"))
OPENMP_FLAG = -fopenmp
endif
# gcc 4.8 and later support RTM intrinsics, but require command line switch to enable them
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^(4\.[8-9]|[5-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^(4\.[8-9]|[5-9])"))
RTM_KEY = -mrtm
endif
# gcc 4.0 and later have -Wextra that is used by some our customers.
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^([4-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^([4-9])"))
WARNING_KEY += -Wextra
endif
# gcc 5.0 and later have -Wsuggest-override and -Wno-sized-deallocation options
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^([5-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^([5-9])"))
# enable -Wsuggest-override via a pre-included header in order to limit to C++11 and above
INCLUDE_TEST_HEADERS = -include $(tbb_root)/src/test/harness_preload.h
WARNING_SUPPRESS += -Wno-sized-deallocation
# gcc 6.0 and later have -flifetime-dse option that controls
# elimination of stores done outside the object lifetime
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^([6-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^([6-9])"))
# keep pre-contruction stores for zero initialization
DSE_KEY = -flifetime-dse=1
endif
endif
ifndef runtime
- export gcc_version:=$(shell gcc -dumpversion)
+ export gcc_version:=$(shell gcc -dumpfullversion -dumpversion)
os_version:=$(shell uname -r)
os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
export os_glibc_version_full:=$(shell getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //')
C_FLAGS = $(CPLUS_FLAGS)
# gcc 4.8 and later support RTM intrinsics, but require command line switch to enable them
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^(4\.[8-9]|[5-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^(4\.[8-9]|[5-9])"))
RTM_KEY = -mrtm
endif
# gcc 5.0 and later have -Wsuggest-override option
# enable it via a pre-included header in order to limit to C++11 and above
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^([5-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^([5-9])"))
INCLUDE_TEST_HEADERS = -include $(tbb_root)/src/test/harness_preload.h
endif
# gcc 6.0 and later have -flifetime-dse option that controls
# elimination of stores done outside the object lifetime
-ifneq (,$(shell $(CONLY) -dumpversion | egrep "^([6-9])"))
+ifneq (,$(shell $(CONLY) -dumpfullversion -dumpversion | egrep "^([6-9])"))
# keep pre-contruction stores for zero initialization
DSE_KEY = -flifetime-dse=1
endif
.. code:: cmake
- tbb_install_config(INSTALL_DIR <install_dir> SYSTEM_NAME Linux|Darwin
+ tbb_install_config(INSTALL_DIR <install_dir> SYSTEM_NAME Linux|Darwin|Windows
[TBB_VERSION <major>.<minor>.<interface>|TBB_VERSION_FILE <version_file>]
[LIB_REL_PATH <lib_rel_path> INC_REL_PATH <inc_rel_path>]
[LIB_PATH <lib_path> INC_PATH <inc_path>])``
write it to TBBConfigVersion.cmake
``TBB_VERSION <major>.<minor>.<interface>`` Directly specified TBB version;
alternative to ``TBB_VERSION_FILE`` parameter
-``LIB_REL_PATH <lib_rel_path>`` Relative path to TBB binaries, default: ``../..``
+``LIB_REL_PATH <lib_rel_path>`` Relative path to TBB binaries (.lib files on Windows), default: ``../../../lib``
+``BIN_REL_PATH <bin_rel_path>`` Relative path to TBB DLLs, default: ``../../../bin`` (applicable for Windows only)
``INC_REL_PATH <inc_rel_path>`` Relative path to TBB headers, default: ``../../../include``
=========================================== ===========================================================
============================ ==============================================
``INSTALL_DIR <directory>`` Directory to install CMake configuration files
``SYSTEM_NAME Linux|Darwin`` OS name to generate config files for
-``LIB_PATH <lib_path>`` Path to installed TBB binaries
+``LIB_PATH <lib_path>`` Path to installed TBB binaries (.lib files on Windows)
+``BIN_PATH <bin_path>`` Path to installed TBB DLLs (applicable for Windows only)
``INC_PATH <inc_path>`` Path to installed TBB headers
============================ ==============================================
function(tbb_install_config)
set(oneValueArgs INSTALL_DIR
SYSTEM_NAME
- LIB_REL_PATH INC_REL_PATH TBB_VERSION TBB_VERSION_FILE
- LIB_PATH INC_PATH) # If TBB is installed on the system
+ LIB_REL_PATH INC_REL_PATH BIN_REL_PATH TBB_VERSION TBB_VERSION_FILE
+ LIB_PATH BIN_PATH INC_PATH) # If TBB is installed on the system
cmake_parse_arguments(tbb_IC "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
file(MAKE_DIRECTORY ${config_install_dir})
# --- TBB_LIB_REL_PATH handling ---
- set(TBB_LIB_REL_PATH "../..")
+ set(TBB_LIB_REL_PATH "../../../lib")
if (tbb_IC_LIB_REL_PATH)
- set(TBB_LIB_REL_PATH ${tbb_IC_LIB_REL_PATH})
+ file(TO_CMAKE_PATH ${tbb_IC_LIB_REL_PATH} TBB_LIB_REL_PATH)
endif()
if (tbb_IC_LIB_PATH)
endif()
# ------
+ # --- TBB_BIN_REL_PATH handling ---
+ set(TBB_BIN_REL_PATH "../../../bin")
+
+ if (tbb_IC_BIN_REL_PATH)
+ file(TO_CMAKE_PATH ${tbb_IC_BIN_REL_PATH} TBB_BIN_REL_PATH)
+ endif()
+
+ if (tbb_IC_BIN_PATH)
+ get_filename_component(bin_abs_path ${tbb_IC_BIN_PATH} ABSOLUTE)
+ file(RELATIVE_PATH TBB_BIN_REL_PATH ${config_install_dir} ${bin_abs_path})
+ unset(bin_abs_path)
+ endif()
+ # ------
+
# --- TBB_INC_REL_PATH handling ---
set(TBB_INC_REL_PATH "../../../include")
if (tbb_IC_INC_REL_PATH)
- set(TBB_INC_REL_PATH ${tbb_IC_INC_REL_PATH})
+ file(TO_CMAKE_PATH ${tbb_IC_INC_REL_PATH} TBB_INC_REL_PATH)
endif()
if (tbb_IC_INC_PATH)
if (tbb_system_name STREQUAL "Linux")
set(TBB_LIB_PREFIX "lib")
set(TBB_LIB_EXT "so.2")
+ set(TBB_IMPLIB_RELEASE "")
+ set(TBB_IMPLIB_DEBUG "")
elseif (tbb_system_name STREQUAL "Darwin")
set(TBB_LIB_PREFIX "lib")
set(TBB_LIB_EXT "dylib")
+ set(TBB_IMPLIB_RELEASE "")
+ set(TBB_IMPLIB_DEBUG "")
+ elseif (tbb_system_name STREQUAL "Windows")
+ set(TBB_LIB_PREFIX "")
+ set(TBB_LIB_EXT "dll")
+ # .lib files installed to TBB_LIB_REL_PATH (e.g. <prefix>/lib);
+ # .dll files installed to TBB_BIN_REL_PATH (e.g. <prefix>/bin);
+ # Expand TBB_LIB_REL_PATH here in IMPORTED_IMPLIB property and
+ # redefine it with TBB_BIN_REL_PATH value to properly fill IMPORTED_LOCATION property in TBBConfig.cmake.in template.
+ set(TBB_IMPLIB_RELEASE "
+ IMPORTED_IMPLIB_RELEASE \"\${CMAKE_CURRENT_LIST_DIR}/${TBB_LIB_REL_PATH}/\${_tbb_component}.lib\"")
+ set(TBB_IMPLIB_DEBUG "
+ IMPORTED_IMPLIB_DEBUG \"\${CMAKE_CURRENT_LIST_DIR}/${TBB_LIB_REL_PATH}/\${_tbb_component}_debug.lib\"")
+ set(TBB_LIB_REL_PATH ${TBB_BIN_REL_PATH})
else()
message(FATAL_ERROR "Unsupported OS name: ${tbb_system_name}")
endif()
# limitations under the License.
function(tbb_conf_gen_print_help)
- message("Usage: cmake -DINSTALL_DIR=<config_install_dir> -DSYSTEM_NAME=Linux|Darwin <parameters> -P tbb_config_generator.cmake
+ message("Usage: cmake -DINSTALL_DIR=<config_install_dir> -DSYSTEM_NAME=Linux|Darwin|Windows <parameters> -P tbb_config_generator.cmake
Parameters:
For custom TBB package:
-DTBB_VERSION_FILE=<tbb_version_file>
-DTBB_VERSION=<major>.<minor>.<interface> (alternative to TBB_VERSION_FILE)
- -DLIB_REL_PATH=<relative_path_to_tbb_binaries>
-DINC_REL_PATH=<relative_path_to_tbb_headers>
+ -DLIB_REL_PATH=<relative_path_to_tbb_libs>
+ -DBIN_REL_PATH=<relative_path_to_tbb_dlls> (only for Windows)
For installed TBB:
- -DLIB_PATH=<path_to_installed_tbb_binaries>
-DINC_PATH=<path_to_installed_tbb_headers>
+ -DLIB_PATH=<path_to_installed_tbb_libs>
+ -DBIN_PATH=<path_to_installed_tbb_dlls> (only for Windows)
")
endfunction()
message(FATAL_ERROR "Required parameter SYSTEM_NAME is not defined")
endif()
-foreach (arg TBB_VERSION LIB_REL_PATH INC_REL_PATH TBB_VERSION_FILE LIB_PATH INC_PATH)
+foreach (arg TBB_VERSION INC_REL_PATH LIB_REL_PATH BIN_REL_PATH TBB_VERSION_FILE INC_PATH LIB_PATH BIN_PATH)
set(optional_args ${optional_args} ${arg} ${${arg}})
endforeach()
if (EXISTS "${_tbb_release_lib}")
set_target_properties(TBB::${_tbb_component} PROPERTIES
- IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}")
+ IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"@TBB_IMPLIB_RELEASE@)
set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
endif()
if (EXISTS "${_tbb_debug_lib}")
set_target_properties(TBB::${_tbb_component} PROPERTIES
- IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}")
+ IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"@TBB_IMPLIB_DEBUG@)
set_property(TARGET TBB::${_tbb_component} APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
endif()
CXX0XFLAGS ?= -std=c++0x
else
# support of lambda started GCC 4.5
- ifneq (, $(strip $(shell g++ -dumpversion | egrep "^(4\.[5-9]|[5-9])")))
+ ifneq (, $(strip $(shell g++ -dumpfullversion -dumpversion | egrep "^(4\.[5-9]|[5-9])")))
CXX0XFLAGS ?= -std=c++0x
endif
endif
.circ {
list-style-type:circle
}
-
+
.single {
padding: 0 0.5em;
}
-
+
/* ------------------------------------------------- */
/* Table styles */
table{
}
th{
border:1px #dddddd solid;
- padding-top:2px;
+ padding-top:2px;
padding-bottom:0px;
- padding-right:3px;
+ padding-right:3px;
padding-left:3px;
}
td{
padding-right:5px;
vertical-align:top;
}
-
+
.specs {
border-collapse:collapse;
}
padding: 0 0.2em 0.2em;
text-align: center;
}
- .specs td tr:last-child td,
+ .specs td tr:last-child td,
.specs td tr:last-child th {
padding: 0 0.2em;
}
<title>Intel® Threading Building Blocks (Intel® TBB). Samples on Intel® TBB Flow Graph feature</title>
</head>
<body>
-
+
<div id="banner">
<img class="logo" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEMAAAAsCAYAAAA+aAX8AAAAAXNSR0IArs4c6QAAAARnQU1BAACx
jwv8YQUAAAAJcEhZcwAALiIAAC4iAari3ZIAAAAZdEVYdFNvZnR3YXJlAEFkb2JlIEltYWdlUmVh
9Or1LzUmVVz+HJXDAAAAAElFTkSuQmCC">
<h1 class="title">Intel® Threading Building Blocks (Intel® TBB).<br>Samples on Intel® TBB Flow Graph feature</h1>
</div>
-
+
<p>
This directory has examples of the Intel TBB Flow Graph feature.
</p>
<dd>Several versions of Cholesky Factorization algorithm implementation.
<dt><a href="stereo/readme.html">stereo</a>
<dd>An implementation of stereo image creation from two images (anaglyph effect).
- <dt><a href="matmult/readme.html">matmult</a>
- <dd>Matrix multiplication Gen kernel implementation with the flow graph interface.
</dl>
</div>
</div>
<div class="show-hide">
<p>
Intel and the Intel logo are trademarks of Intel Corporation in the U.S. and/or other countries.
- <br>* Other names and brands may be claimed as the property of others.
+ <br>* Other names and brands may be claimed as the property of others.
<br>© 2019, Intel Corporation
</p>
</div>
--- /dev/null
+# Copyright (c) 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.0.0 FATAL_ERROR)
+
+project(fibonacci CXX)
+add_executable(fibonacci Fibonacci.cpp)
+
+# find_package will search for available TBBConfig using variables CMAKE_PREFIX_PATH and TBB_DIR.
+find_package(TBB REQUIRED tbb)
+
+target_link_libraries(fibonacci
+ ${TBB_IMPORTED_TARGETS} # Link TBB imported targets to the executable; "TBB::tbb" can be used instead of "${TBB_IMPORTED_TARGETS}".
+ $<$<PLATFORM_ID:Linux>:rt>) # Link "rt" library on Linux
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_concurrent_map_H
+#define __TBB_concurrent_map_H
+
+#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS
+#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_map.h
+#endif
+
+#include "tbb_config.h"
+
+// concurrent_map requires C++11 support
+#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+
+#include "internal/_concurrent_skip_list_impl.h"
+
+namespace tbb {
+
+namespace interface10 {
+
+template<typename Key, typename Value, typename KeyCompare, typename RandomGenerator,
+ size_t MAX_LEVELS, typename Allocator, bool AllowMultimapping>
+class map_traits {
+public:
+ static constexpr size_t MAX_LEVEL = MAX_LEVELS;
+ using random_level_generator_type = RandomGenerator;
+ using key_type = Key;
+ using mapped_type = Value;
+ using compare_type = KeyCompare;
+ using value_type = std::pair<const key_type, mapped_type>;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using allocator_type = Allocator;
+ using mutex_type = tbb::spin_mutex;
+ using node_type = tbb::internal::node_handle<key_type, value_type, internal::skip_list_node<value_type, mutex_type>, allocator_type>;
+
+ static const bool allow_multimapping = AllowMultimapping;
+
+ class value_compare {
+ public:
+ // TODO: these member types are deprecated in C++17, do we need to let them
+ using result_type = bool;
+ using first_argument_type = value_type;
+ using second_argument_type = value_type;
+
+ bool operator()(const value_type& lhs, const value_type& rhs) const {
+ return comp(lhs.first, rhs.first);
+ }
+
+ protected:
+ value_compare(compare_type c) : comp(c) {}
+
+ friend class map_traits;
+
+ compare_type comp;
+ };
+
+ static value_compare value_comp(compare_type comp) { return value_compare(comp); }
+
+ static const key_type& get_key(const_reference val) {
+ return val.first;
+ }
+}; // class map_traits
+
+template <typename Key, typename Value, typename Comp, typename Allocator>
+class concurrent_multimap;
+
+template <typename Key, typename Value, typename Comp = std::less<Key>, typename Allocator = tbb_allocator<std::pair<const Key, Value>>>
+class concurrent_map
+ : public internal::concurrent_skip_list<map_traits<Key, Value, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, false>> {
+ using traits_type = map_traits<Key, Value, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, false>;
+ using base_type = internal::concurrent_skip_list<traits_type>;
+#if __TBB_EXTRA_DEBUG
+public:
+#endif
+ using base_type::allow_multimapping;
+public:
+ using key_type = Key;
+ using mapped_type = Value;
+ using value_type = typename traits_type::value_type;
+ using size_type = typename base_type::size_type;
+ using difference_type = typename base_type::difference_type;
+ using key_compare = Comp;
+ using value_compare = typename base_type::value_compare;
+ using allocator_type = Allocator;
+
+ using reference = typename base_type::reference;
+ using const_reference = typename base_type::const_reference;
+ using pointer = typename base_type::pointer;
+ using const_pointer = typename base_type::pointer;
+
+ using iterator = typename base_type::iterator;
+ using const_iterator = typename base_type::const_iterator;
+ using reverse_iterator = typename base_type::reverse_iterator;
+ using const_reverse_iterator = typename base_type::const_reverse_iterator;
+
+ using node_type = typename base_type::node_type;
+
+ using base_type::end;
+ using base_type::find;
+ using base_type::emplace;
+ using base_type::insert;
+
+ concurrent_map() = default;
+
+ explicit concurrent_map(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {}
+
+ explicit concurrent_map(const allocator_type& alloc) : base_type(key_compare(), alloc) {}
+
+ template< class InputIt >
+ concurrent_map(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(first, last, comp, alloc) {}
+
+ template< class InputIt >
+ concurrent_map(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {}
+
+ /** Copy constructor */
+ concurrent_map(const concurrent_map&) = default;
+
+ concurrent_map(const concurrent_map& other, const allocator_type& alloc) : base_type(other, alloc) {}
+
+ concurrent_map(concurrent_map&&) = default;
+
+ concurrent_map(concurrent_map&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {}
+
+ concurrent_map(std::initializer_list<value_type> init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(comp, alloc) {
+ insert(init);
+ }
+
+ concurrent_map(std::initializer_list<value_type> init, const allocator_type& alloc)
+ : base_type(key_compare(), alloc) {
+ insert(init);
+ }
+
+ concurrent_map& operator=(const concurrent_map& other) {
+ return static_cast<concurrent_map&>(base_type::operator=(other));
+ }
+
+ concurrent_map& operator=(concurrent_map&& other) {
+ return static_cast<concurrent_map&>(base_type::operator=(std::move(other)));
+ }
+
+ mapped_type& at(const key_type& key) {
+ iterator it = find(key);
+
+ if (it == end()) {
+ tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+ }
+
+ return it->second;
+ }
+
+ const mapped_type& at(const key_type& key) const {
+ const_iterator it = find(key);
+
+ if (it == end()) {
+ tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+ }
+
+ return it->second;
+ }
+
+ mapped_type& operator[](const key_type& key) {
+ iterator it = find(key);
+
+ if (it == end()) {
+ it = emplace(std::piecewise_construct, std::forward_as_tuple(key), std::tuple<>()).first;
+ }
+
+ return it->second;
+ }
+
+ mapped_type& operator[](key_type&& key) {
+ iterator it = find(key);
+
+ if (it == end()) {
+ it = emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::tuple<>()).first;
+ }
+
+ return it->second;
+ }
+
+ template<typename P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type>
+ std::pair<iterator, bool> insert(P&& value) {
+ return emplace(std::forward<P>(value));
+ }
+
+ template<typename P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type>
+ iterator insert(const_iterator hint, P&& value) {
+ return emplace_hint(hint, std::forward<P>(value));
+ return end();
+ }
+
+ template<typename C2>
+ void merge(concurrent_map<key_type, mapped_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_map<key_type, mapped_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+
+ template<typename C2>
+ void merge(concurrent_multimap<key_type, mapped_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_multimap<key_type, mapped_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+}; // class concurrent_map
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+namespace internal {
+
+using namespace tbb::internal;
+
+template<template<typename...> typename Map, typename Key, typename T, typename... Args>
+using c_map_t = Map<Key, T,
+ std::conditional_t< (sizeof...(Args) > 0) && !is_allocator_v<pack_element_t<0, Args...> >,
+ pack_element_t<0, Args...>, std::less<Key> >,
+ std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v<pack_element_t<sizeof...(Args)-1, Args...> >,
+ pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > > >;
+} // namespace internal
+
+template<typename It, typename... Args>
+concurrent_map(It, It, Args...)
+-> internal::c_map_t<concurrent_map, internal::iterator_key_t<It>, internal::iterator_mapped_t<It>, Args...>;
+
+template<typename Key, typename T, typename... Args>
+concurrent_map(std::initializer_list<std::pair<const Key, T>>, Args...)
+-> internal::c_map_t<concurrent_map, Key, T, Args...>;
+
+#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+template <typename Key, typename Value, typename Comp = std::less<Key>, typename Allocator = tbb_allocator<std::pair<const Key, Value>>>
+class concurrent_multimap
+ : public internal::concurrent_skip_list<map_traits<Key, Value, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, true>> {
+ using traits_type = map_traits<Key, Value, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, true>;
+ using base_type = internal::concurrent_skip_list<traits_type>;
+#if __TBB_EXTRA_DEBUG
+public:
+#endif
+ using base_type::allow_multimapping;
+public:
+ using key_type = Key;
+ using mapped_type = Value;
+ using value_type = typename traits_type::value_type;
+ using size_type = typename base_type::size_type;
+ using difference_type = typename base_type::difference_type;
+ using key_compare = Comp;
+ using value_compare = typename base_type::value_compare;
+ using allocator_type = Allocator;
+
+ using reference = typename base_type::reference;
+ using const_reference = typename base_type::const_reference;
+ using pointer = typename base_type::pointer;
+ using const_pointer = typename base_type::pointer;
+
+ using iterator = typename base_type::iterator;
+ using const_iterator = typename base_type::const_iterator;
+ using reverse_iterator = typename base_type::reverse_iterator;
+ using const_reverse_iterator = typename base_type::const_reverse_iterator;
+
+ using node_type = typename base_type::node_type;
+
+ using base_type::end;
+ using base_type::find;
+ using base_type::emplace;
+ using base_type::insert;
+
+ concurrent_multimap() = default;
+
+ explicit concurrent_multimap(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {}
+
+ explicit concurrent_multimap(const allocator_type& alloc) : base_type(key_compare(), alloc) {}
+
+ template< class InputIt >
+ concurrent_multimap(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(first, last, comp, alloc) {}
+
+ template< class InputIt >
+ concurrent_multimap(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {}
+
+ /** Copy constructor */
+ concurrent_multimap(const concurrent_multimap&) = default;
+
+ concurrent_multimap(const concurrent_multimap& other, const allocator_type& alloc) : base_type(other, alloc) {}
+
+ concurrent_multimap(concurrent_multimap&&) = default;
+
+ concurrent_multimap(concurrent_multimap&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {}
+
+ concurrent_multimap(std::initializer_list<value_type> init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(comp, alloc) {
+ insert(init);
+ }
+
+ concurrent_multimap(std::initializer_list<value_type> init, const allocator_type& alloc)
+ : base_type(key_compare(), alloc) {
+ insert(init);
+ }
+
+ concurrent_multimap& operator=(const concurrent_multimap& other) {
+ return static_cast<concurrent_multimap&>(base_type::operator=(other));
+ }
+
+ concurrent_multimap& operator=(concurrent_multimap&& other) {
+ return static_cast<concurrent_multimap&>(base_type::operator=(std::move(other)));
+ }
+
+ template<typename P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type>
+ std::pair<iterator, bool> insert(P&& value) {
+ return emplace(std::forward<P>(value));
+ }
+
+ template<typename P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type>
+ iterator insert(const_iterator hint, P&& value) {
+ return emplace_hint(hint, std::forward<P>(value));
+ return end();
+ }
+
+ template<typename C2>
+ void merge(concurrent_multimap<key_type, mapped_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_multimap<key_type, mapped_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+
+ template<typename C2>
+ void merge(concurrent_map<key_type, mapped_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_map<key_type, mapped_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+
+}; // class concurrent_multimap
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+template<typename It, typename... Args>
+concurrent_multimap(It, It, Args...)
+-> internal::c_map_t<concurrent_multimap, internal::iterator_key_t<It>, internal::iterator_mapped_t<It>, Args...>;
+
+template<typename Key, typename T, typename... Args>
+concurrent_multimap(std::initializer_list<std::pair<const Key, T>>, Args...)
+-> internal::c_map_t<concurrent_multimap, Key, T, Args...>;
+
+#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+} // namespace interface10
+
+using interface10::concurrent_map;
+using interface10::concurrent_multimap;
+
+} // namespace tbb
+
+#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+#endif // __TBB_concurrent_map_H
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_concurrent_set_H
+#define __TBB_concurrent_set_H
+
+#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS
+#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_set.h
+#endif
+
+#include "tbb/tbb_config.h"
+
+// concurrent_set requires C++11 support
+#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+
+#include "internal/_concurrent_skip_list_impl.h"
+
+namespace tbb {
+namespace interface10 {
+
+// TODO: test this class
+template<typename Key, typename KeyCompare, typename RandomGenerator, size_t MAX_LEVELS, typename Allocator, bool AllowMultimapping>
+class set_traits {
+public:
+ static constexpr size_t MAX_LEVEL = MAX_LEVELS;
+ using random_level_generator_type = RandomGenerator;
+ using key_type = Key;
+ using value_type = key_type;
+ using compare_type = KeyCompare;
+ using value_compare = compare_type;
+ using reference = value_type & ;
+ using const_reference = const value_type&;
+ using allocator_type = Allocator;
+ using mutex_type = tbb::spin_mutex;
+ using node_type = tbb::internal::node_handle<key_type, value_type, internal::skip_list_node<value_type, mutex_type>, allocator_type>;
+
+ static const bool allow_multimapping = AllowMultimapping;
+
+ static const key_type& get_key(const_reference val) {
+ return val;
+ }
+
+ static value_compare value_comp(compare_type comp) { return comp; }
+};
+
+template <typename Key, typename Comp, typename Allocator>
+class concurrent_multiset;
+
+template <typename Key, typename Comp = std::less<Key>, typename Allocator = tbb_allocator<Key>>
+class concurrent_set
+ : public internal::concurrent_skip_list<set_traits<Key, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, false>> {
+ using traits_type = set_traits<Key, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, false>;
+ using base_type = internal::concurrent_skip_list<traits_type>;
+#if __TBB_EXTRA_DEBUG
+public:
+#endif
+ using base_type::allow_multimapping;
+public:
+ using key_type = Key;
+ using value_type = typename traits_type::value_type;
+ using size_type = typename base_type::size_type;
+ using difference_type = typename base_type::difference_type;
+ using key_compare = Comp;
+ using value_compare = typename base_type::value_compare;
+ using allocator_type = Allocator;
+
+ using reference = typename base_type::reference;
+ using const_reference = typename base_type::const_reference;
+ using pointer = typename base_type::pointer;
+ using const_pointer = typename base_type::pointer;
+
+ using iterator = typename base_type::iterator;
+ using const_iterator = typename base_type::const_iterator;
+ using reverse_iterator = typename base_type::reverse_iterator;
+ using const_reverse_iterator = typename base_type::const_reverse_iterator;
+
+ using node_type = typename base_type::node_type;
+
+ using base_type::insert;
+
+ concurrent_set() = default;
+
+ explicit concurrent_set(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {}
+
+ explicit concurrent_set(const allocator_type& alloc) : base_type(key_compare(), alloc) {}
+
+ template< class InputIt >
+ concurrent_set(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(first, last, comp, alloc) {}
+
+ template< class InputIt >
+ concurrent_set(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {}
+
+ /** Copy constructor */
+ concurrent_set(const concurrent_set&) = default;
+
+ concurrent_set(const concurrent_set& other, const allocator_type& alloc) : base_type(other, alloc) {}
+
+ concurrent_set(concurrent_set&&) = default;
+
+ concurrent_set(concurrent_set&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {}
+
+ concurrent_set(std::initializer_list<value_type> init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(comp, alloc) {
+ insert(init);
+ }
+
+ concurrent_set(std::initializer_list<value_type> init, const allocator_type& alloc)
+ : base_type(key_compare(), alloc) {
+ insert(init);
+ }
+
+ concurrent_set& operator=(const concurrent_set& other) {
+ return static_cast<concurrent_set&>(base_type::operator=(other));
+ }
+
+ concurrent_set& operator=(concurrent_set&& other) {
+ return static_cast<concurrent_set&>(base_type::operator=(std::move(other)));
+ }
+
+ template<typename C2>
+ void merge(concurrent_set<key_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_set<key_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+
+ template<typename C2>
+ void merge(concurrent_multiset<key_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_multiset<key_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+}; // class concurrent_set
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+namespace internal {
+
+using namespace tbb::internal;
+
+template<template<typename...> typename Set, typename Key, typename... Args>
+using c_set_t = Set<Key,
+ std::conditional_t< (sizeof...(Args) > 0) && !is_allocator_v<pack_element_t<0, Args...> >,
+ pack_element_t<0, Args...>, std::less<Key> >,
+ std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v<pack_element_t<sizeof...(Args)-1, Args...> >,
+ pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<Key> > >;
+} // namespace internal
+
+template<typename It, typename... Args>
+concurrent_set(It, It, Args...)
+-> internal::c_set_t<concurrent_set, internal::iterator_value_t<It>, Args...>;
+
+template<typename Key, typename... Args>
+concurrent_set(std::initializer_list<Key>, Args...)
+-> internal::c_set_t<concurrent_set, Key, Args...>;
+
+#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+template <typename Key, typename Comp = std::less<Key>, typename Allocator = tbb_allocator<Key>>
+class concurrent_multiset
+ : public internal::concurrent_skip_list<set_traits<Key, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, true>> {
+ using traits_type = set_traits<Key, Comp, internal::concurrent_geometric_level_generator<64>, 64, Allocator, true>;
+ using base_type = internal::concurrent_skip_list<traits_type>;
+#if __TBB_EXTRA_DEBUG
+public:
+#endif
+ using base_type::allow_multimapping;
+public:
+ using key_type = Key;
+ using value_type = typename traits_type::value_type;
+ using size_type = typename base_type::size_type;
+ using difference_type = typename base_type::difference_type;
+ using key_compare = Comp;
+ using value_compare = typename base_type::value_compare;
+ using allocator_type = Allocator;
+
+ using reference = typename base_type::reference;
+ using const_reference = typename base_type::const_reference;
+ using pointer = typename base_type::pointer;
+ using const_pointer = typename base_type::pointer;
+
+ using iterator = typename base_type::iterator;
+ using const_iterator = typename base_type::const_iterator;
+ using reverse_iterator = typename base_type::reverse_iterator;
+ using const_reverse_iterator = typename base_type::const_reverse_iterator;
+
+ using node_type = typename base_type::node_type;
+
+ using base_type::insert;
+
+ concurrent_multiset() = default;
+
+ explicit concurrent_multiset(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {}
+
+ explicit concurrent_multiset(const allocator_type& alloc) : base_type(key_compare(), alloc) {}
+
+ template< class InputIt >
+ concurrent_multiset(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(comp, alloc) {
+ insert(first, last);
+ }
+
+ template< class InputIt >
+ concurrent_multiset(InputIt first, InputIt last, const allocator_type& alloc) : base_type(key_compare(), alloc) {
+ insert(first, last);
+ }
+
+ /** Copy constructor */
+ concurrent_multiset(const concurrent_multiset&) = default;
+
+ concurrent_multiset(const concurrent_multiset& other, const allocator_type& alloc) : base_type(other, alloc) {}
+
+ concurrent_multiset(concurrent_multiset&&) = default;
+
+ concurrent_multiset(concurrent_multiset&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {}
+
+ concurrent_multiset(std::initializer_list<value_type> init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type())
+ : base_type(comp, alloc) {
+ insert(init);
+ }
+
+ concurrent_multiset(std::initializer_list<value_type> init, const allocator_type& alloc)
+ : base_type(key_compare(), alloc) {
+ insert(init);
+ }
+
+ concurrent_multiset& operator=(const concurrent_multiset& other) {
+ return static_cast<concurrent_multiset&>(base_type::operator=(other));
+ }
+
+ concurrent_multiset& operator=(concurrent_multiset&& other) {
+ return static_cast<concurrent_multiset&>(base_type::operator=(std::move(other)));
+ }
+
+ template<typename C2>
+ void merge(concurrent_set<key_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_set<key_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+
+ template<typename C2>
+ void merge(concurrent_multiset<key_type, C2, Allocator>& source) {
+ this->internal_merge(source);
+ }
+
+ template<typename C2>
+ void merge(concurrent_multiset<key_type, C2, Allocator>&& source) {
+ this->internal_merge(std::move(source));
+ }
+}; // class concurrent_multiset
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+
+template<typename It, typename... Args>
+concurrent_multiset(It, It, Args...)
+-> internal::c_set_t<concurrent_multiset, internal::iterator_value_t<It>, Args...>;
+
+template<typename Key, typename... Args>
+concurrent_multiset(std::initializer_list<Key>, Args...)
+-> internal::c_set_t<concurrent_multiset, Key, Args...>;
+
+#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+
+} // namespace interface10
+
+using interface10::concurrent_set;
+using interface10::concurrent_multiset;
+
+} // namespace tbb
+
+#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+#endif // __TBB_concurrent_set_H
typedef Hash_compare hash_compare;
typedef typename tbb::internal::allocator_rebind<Allocator, value_type>::type allocator_type;
#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
- typedef internal::node_handle<Key, value_type, Allocator> node_type;
+ typedef tbb::internal::node_handle<key_type, value_type,
+ typename internal::split_ordered_list<value_type, allocator_type>::node,
+ allocator_type> node_type;
#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
enum { allow_multimapping = Allow_multimapping };
typedef Hash_compare hash_compare;
typedef typename tbb::internal::allocator_rebind<Allocator, value_type>::type allocator_type;
#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
- typedef internal::node_handle<Key, Key, allocator_type> node_type;
+ typedef tbb::internal::node_handle<key_type, key_type,
+ typename internal::split_ordered_list<key_type, allocator_type>::node,
+ allocator_type> node_type;
#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
enum { allow_multimapping = Allow_multimapping };
template< typename T > class sender;
template< typename T > class receiver;
class continue_receiver;
-template< typename T > class limiter_node; // needed for resetting decrementer
+} // namespaceX
+namespace interface11 {
+template< typename T, typename U > class limiter_node; // needed for resetting decrementer
+}
+namespace interface10 {
template< typename R, typename B > class run_and_put_task;
namespace internal {
class untyped_receiver {
template< typename, typename > friend class run_and_put_task;
- template< typename > friend class limiter_node;
template< typename, typename > friend class internal::broadcast_cache;
template< typename, typename > friend class internal::round_robin_cache;
protected:
//! put receiver back in initial state
- template<typename U> friend class limiter_node;
virtual void reset_receiver(reset_flags f = rf_reset_protocol) = 0;
template<typename TT, typename M> friend class internal::successor_cache;
__TBB_FLOW_GRAPH_PRIORITY_EXPR( node_priority_t my_priority; )
// the friend declaration in the base class did not eliminate the "protected class"
// error in gcc 4.1.2
- template<typename U> friend class limiter_node;
+ template<typename U, typename V> friend class tbb::flow::interface11::limiter_node;
void reset_receiver( reset_flags f ) __TBB_override {
my_current_count = 0;
}
}; // priority_queue_node
+} // interfaceX
+
+namespace interface11 {
+
+using namespace interface10;
+namespace internal = interface10::internal;
+
//! Forwards messages only if the threshold has not been reached
/** This node forwards items until its threshold is reached.
It contains no buffering. If the downstream node rejects, the
message is dropped. */
-template< typename T >
+template< typename T, typename DecrementType=continue_msg >
class limiter_node : public graph_node, public receiver< T >, public sender< T > {
public:
typedef T input_type;
internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors;
spin_mutex my_mutex;
internal::broadcast_cache< T > my_successors;
- int init_decrement_predecessors;
+ __TBB_DEPRECATED_LIMITER_EXPR( int init_decrement_predecessors; )
- friend class internal::forward_task_bypass< limiter_node<T> >;
+ friend class internal::forward_task_bypass< limiter_node<T,DecrementType> >;
// Let decrementer call decrement_counter()
- friend class internal::decrementer< limiter_node<T> >;
+ friend class internal::decrementer< limiter_node<T,DecrementType>, DecrementType >;
bool check_conditions() { // always called under lock
return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() );
if ( check_conditions() ) {
if ( internal::is_graph_active(this->my_graph) ) {
task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
- internal::forward_task_bypass< limiter_node<T> >( *this );
+ internal::forward_task_bypass< limiter_node<T, DecrementType> >( *this );
internal::spawn_in_graph_arena(graph_reference(), *rtask);
}
}
if ( check_conditions() ) {
if ( internal::is_graph_active(this->my_graph) ) {
task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
- internal::forward_task_bypass< limiter_node<T> >( *this );
+ internal::forward_task_bypass< limiter_node<T, DecrementType> >( *this );
__TBB_ASSERT(!rval, "Have two tasks to handle");
return rtask;
}
return;
}
- task * decrement_counter() {
+ task* decrement_counter( long long delta ) {
{
spin_mutex::scoped_lock lock(my_mutex);
- if(my_count) --my_count;
+ if( delta > 0 && size_t(delta) > my_count )
+ my_count = 0;
+ else if( delta < 0 && size_t(delta) > my_threshold - my_count )
+ my_count = my_threshold;
+ else
+ my_count -= size_t(delta); // absolute value of delta is sufficiently small
}
return forward_task();
}
-public:
- //! The internal receiver< continue_msg > that decrements the count
- internal::decrementer< limiter_node<T> > decrement;
-
- //! Constructor
- limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) :
- graph_node(g), my_threshold(threshold), my_count(0), my_tries(0),
- init_decrement_predecessors(num_decrement_predecessors),
- decrement(num_decrement_predecessors)
- {
+ void initialize() {
my_predecessors.set_owner(this);
my_successors.set_owner(this);
decrement.set_owner(this);
- tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph,
- static_cast<receiver<input_type> *>(this), static_cast<receiver<continue_msg> *>(&decrement),
- static_cast<sender<output_type> *>(this) );
+ tbb::internal::fgt_node(
+ tbb::internal::FLOW_LIMITER_NODE, &this->my_graph,
+ static_cast<receiver<input_type> *>(this), static_cast<receiver<DecrementType> *>(&decrement),
+ static_cast<sender<output_type> *>(this)
+ );
+ }
+public:
+ //! The internal receiver< DecrementType > that decrements the count
+ internal::decrementer< limiter_node<T, DecrementType>, DecrementType > decrement;
+
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+ __TBB_STATIC_ASSERT( (tbb::internal::is_same_type<DecrementType, continue_msg>::value),
+ "Deprecated interface of the limiter node can be used only in conjunction "
+ "with continue_msg as the type of DecrementType template parameter." );
+#endif // Check for incompatible interface
+
+ //! Constructor
+ limiter_node(graph &g,
+ __TBB_DEPRECATED_LIMITER_ARG2(size_t threshold, int num_decrement_predecessors=0))
+ : graph_node(g), my_threshold(threshold), my_count(0),
+ __TBB_DEPRECATED_LIMITER_ARG4(
+ my_tries(0), decrement(),
+ init_decrement_predecessors(num_decrement_predecessors),
+ decrement(num_decrement_predecessors)) {
+ initialize();
}
//! Copy constructor
limiter_node( const limiter_node& src ) :
graph_node(src.my_graph), receiver<T>(), sender<T>(),
- my_threshold(src.my_threshold), my_count(0), my_tries(0),
- init_decrement_predecessors(src.init_decrement_predecessors),
- decrement(src.init_decrement_predecessors)
- {
- my_predecessors.set_owner(this);
- my_successors.set_owner(this);
- decrement.set_owner(this);
- tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph,
- static_cast<receiver<input_type> *>(this), static_cast<receiver<continue_msg> *>(&decrement),
- static_cast<sender<output_type> *>(this) );
+ my_threshold(src.my_threshold), my_count(0),
+ __TBB_DEPRECATED_LIMITER_ARG4(
+ my_tries(0), decrement(),
+ init_decrement_predecessors(src.init_decrement_predecessors),
+ decrement(src.init_decrement_predecessors)) {
+ initialize();
}
#if TBB_PREVIEW_FLOW_GRAPH_TRACE
if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) {
if ( internal::is_graph_active(this->my_graph) ) {
task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
- internal::forward_task_bypass < limiter_node<T> >( *this );
+ internal::forward_task_bypass < limiter_node<T, DecrementType> >( *this );
internal::spawn_in_graph_arena(graph_reference(), *task);
}
}
my_predecessors.add( src );
if ( my_count + my_tries < my_threshold && !my_successors.empty() && internal::is_graph_active(this->my_graph) ) {
task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
- internal::forward_task_bypass < limiter_node<T> >( *this );
+ internal::forward_task_bypass < limiter_node<T, DecrementType> >( *this );
internal::spawn_in_graph_arena(graph_reference(), *task);
}
return true;
--my_tries;
if (check_conditions() && internal::is_graph_active(this->my_graph)) {
rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
- internal::forward_task_bypass< limiter_node<T> >( *this );
+ internal::forward_task_bypass< limiter_node<T, DecrementType> >( *this );
}
}
else {
return rtask;
}
- graph& graph_reference() __TBB_override {
- return my_graph;
- }
+ graph& graph_reference() __TBB_override { return my_graph; }
void reset_receiver(reset_flags /*f*/) __TBB_override {
__TBB_ASSERT(false,NULL); // should never be called
decrement.reset_receiver(f);
}
}; // limiter_node
+} // namespace interfaceX
+
+namespace interface10 {
#include "internal/_flow_graph_join_impl.h"
//! Implements gateway_type::try_put for an external activity to submit a message to FG
bool try_put_impl(const Output &i) {
internal::multifunction_output<Output> &port_0 = internal::output_port<0>(*this);
+ internal::broadcast_cache<output_type>& port_successors = port_0.successors();
tbb::internal::fgt_async_try_put_begin(this, &port_0);
- try_put_functor tpf(port_0, i);
- internal::execute_in_graph_arena(this->my_graph, tpf);
+ task_list tasks;
+ bool is_at_least_one_put_successful = port_successors.gather_successful_try_puts(i, tasks);
+ __TBB_ASSERT( is_at_least_one_put_successful || tasks.empty(),
+ "Return status is inconsistent with the method operation." );
+
+ while( !tasks.empty() ) {
+ internal::enqueue_in_graph_arena(this->my_graph, tasks.pop_front());
+ }
tbb::internal::fgt_async_try_put_end(this, &port_0);
- return tpf.result;
+ return is_at_least_one_put_successful;
}
public:
using interface10::queue_node;
using interface10::sequencer_node;
using interface10::priority_queue_node;
- using interface10::limiter_node;
+ using interface11::limiter_node;
using namespace interface10::internal::graph_policy_namespace;
using interface10::join_node;
using interface10::input_port;
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_concurrent_skip_list_H
+#define __TBB_concurrent_skip_list_H
+
+#if !defined(__TBB_concurrent_map_H) && !defined(__TBB_concurrent_set_H)
+#error Do not #include this internal file directly; use public TBB headers instead.
+#endif
+
+#include "../tbb_config.h"
+#include "../tbb_stddef.h"
+#include "../tbb_allocator.h"
+#include "../spin_mutex.h"
+#include "../tbb_exception.h"
+#include "../enumerable_thread_specific.h"
+#include "_allocator_traits.h"
+#include "_template_helpers.h"
+#include "_node_handle_impl.h"
+#include <utility> // Need std::pair
+#include <functional>
+#include <initializer_list>
+#include <memory> // Need std::allocator_traits
+#include <atomic>
+#include <mutex>
+#include <vector>
+#include <array>
+#include <type_traits>
+#include <cstdlib>
+#include <random>
+#include <tuple>
+
+#if _MSC_VER
+#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced
+#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it
+#endif
+
+namespace tbb {
+namespace interface10 {
+namespace internal {
+
+template <typename Value, typename Mutex>
+class skip_list_node {
+
+public:
+ using value_type = Value;
+ using size_type = std::size_t;
+ using reference = value_type & ;
+ using const_reference = const value_type & ;
+ using pointer = value_type * ;
+ using const_pointer = const value_type *;
+ using node_pointer = skip_list_node * ;
+ using atomic_node_pointer = std::atomic<node_pointer>;
+
+ using mutex_type = Mutex;
+ using lock_type = std::unique_lock<mutex_type>;
+
+ skip_list_node(size_type levels) : my_height(levels), my_fullyLinked(false) {
+ for (size_type lev = 0; lev < my_height; ++lev)
+ new(&my_next(lev)) atomic_node_pointer(nullptr);
+ __TBB_ASSERT(height() == levels, "Wrong node height");
+ }
+
+ ~skip_list_node() {
+ for(size_type lev = 0; lev < my_height; ++lev)
+ my_next(lev).~atomic();
+ }
+
+ skip_list_node(const skip_list_node&) = delete;
+
+ skip_list_node(skip_list_node&&) = delete;
+
+ skip_list_node& operator=(const skip_list_node&) = delete;
+
+ pointer storage() {
+ return reinterpret_cast<pointer>(&my_val);
+ }
+
+ reference value() {
+ return *storage();
+ }
+
+ node_pointer next(size_type level) const {
+ __TBB_ASSERT(level < height(), "Cannot get next on the level greater than height");
+ return my_next(level).load(std::memory_order_acquire);
+ }
+
+ void set_next(size_type level, node_pointer next) {
+ __TBB_ASSERT(level < height(), "Cannot set next on the level greater than height");
+
+ my_next(level).store(next, std::memory_order_release);
+ }
+
+ /** @return number of layers */
+ size_type height() const {
+ return my_height;
+ }
+
+ bool fully_linked() const {
+ return my_fullyLinked.load(std::memory_order_acquire);
+ }
+
+ void mark_linked() {
+ my_fullyLinked.store(true, std::memory_order_release);
+ }
+
+ lock_type acquire() {
+ return lock_type(my_mutex);
+ }
+
+private:
+ using aligned_storage_type = typename std::aligned_storage<sizeof(value_type), alignof(value_type)>::type;
+
+ atomic_node_pointer& my_next(size_type level) {
+ atomic_node_pointer* arr = reinterpret_cast<atomic_node_pointer*>(this + 1);
+ return arr[level];
+ }
+
+ const atomic_node_pointer& my_next(size_type level) const {
+ const atomic_node_pointer* arr = reinterpret_cast<const atomic_node_pointer*>(this + 1);
+ return arr[level];
+ }
+
+ mutex_type my_mutex;
+ aligned_storage_type my_val;
+ size_type my_height;
+ std::atomic_bool my_fullyLinked;
+};
+
+template <typename NodeType, bool is_const>
+class skip_list_iterator {
+ using node_type = NodeType;
+ using node_ptr = node_type*;
+public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename node_type::value_type;
+ using difference_type = std::ptrdiff_t;
+ using pointer = typename std::conditional<is_const, typename node_type::const_pointer,
+ typename node_type::pointer>::type;
+ using reference = typename std::conditional<is_const, typename node_type::const_reference,
+ typename node_type::reference>::type;
+
+ skip_list_iterator() : my_node_ptr(nullptr) {}
+
+ // TODO: the code above does not compile in VS2015 (seems like a bug) - consider enabling it for all other platforms
+ // template <typename T = void, typename = typename std::enable_if<is_const, T>::type>
+ // skip_list_iterator(const skip_list_iterator<node_type, false>& other) : my_node_ptr(other.my_node_ptr) {}
+
+ // skip_list_iterator(const skip_list_iterator& other) : my_node_ptr(other.my_node_ptr) {}
+
+ skip_list_iterator(const skip_list_iterator<node_type, false>& other) : my_node_ptr(other.my_node_ptr) {}
+
+ template <typename T = void, typename = typename std::enable_if<is_const, T>::type>
+ skip_list_iterator(const skip_list_iterator<node_type, true>& other) : my_node_ptr(other.my_node_ptr) {}
+
+ reference operator*() const { return *(my_node_ptr->storage()); }
+ pointer operator->() const { return &**this; }
+
+ skip_list_iterator& operator++() {
+ __TBB_ASSERT(my_node_ptr != nullptr, NULL);
+ my_node_ptr = my_node_ptr->next(0);
+ return *this;
+ }
+
+ skip_list_iterator operator++(int) {
+ skip_list_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+private:
+ skip_list_iterator(node_type* n) : my_node_ptr(n) {}
+
+ node_ptr my_node_ptr;
+
+ template <typename Traits>
+ friend class concurrent_skip_list;
+
+ friend class skip_list_iterator<NodeType, true>;
+
+ friend class const_range;
+ friend class range;
+
+ template <typename T, bool M, bool U>
+ friend bool operator==(const skip_list_iterator<T, M>&, const skip_list_iterator<T, U>&);
+
+ template <typename T, bool M, bool U>
+ friend bool operator!=(const skip_list_iterator<T, M>&, const skip_list_iterator<T, U>&);
+};
+
+template <typename NodeType, bool is_const1, bool is_const2>
+bool operator==(const skip_list_iterator<NodeType, is_const1>& lhs, const skip_list_iterator<NodeType, is_const2>& rhs) {
+ return lhs.my_node_ptr == rhs.my_node_ptr;
+}
+
+template <typename NodeType, bool is_const1, bool is_const2>
+bool operator!=(const skip_list_iterator<NodeType, is_const1>& lhs, const skip_list_iterator<NodeType, is_const2>& rhs) {
+ return lhs.my_node_ptr != rhs.my_node_ptr;
+}
+
+template <typename Traits>
+class concurrent_skip_list {
+protected:
+ using traits_type = Traits;
+ using allocator_type = typename traits_type::allocator_type;
+ using allocator_traits_type = std::allocator_traits<allocator_type>;
+ using key_compare = typename traits_type::compare_type;
+ using value_compare = typename traits_type::value_compare;
+ using key_type = typename traits_type::key_type;
+ using value_type = typename traits_type::value_type;
+ using node_type = typename traits_type::node_type;
+ using list_node_type = skip_list_node<value_type, typename traits_type::mutex_type>;
+
+ using iterator = skip_list_iterator<list_node_type, /*is_const=*/false>;
+ using const_iterator = skip_list_iterator<list_node_type, /*is_const=*/true>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = typename allocator_traits_type::pointer;
+ using const_pointer = typename allocator_traits_type::const_pointer;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+
+ using random_level_generator_type = typename traits_type::random_level_generator_type;
+ using node_allocator_type = typename std::allocator_traits<allocator_type>::template rebind_alloc<uint8_t>;
+ using node_allocator_traits = typename std::allocator_traits<allocator_type>::template rebind_traits<uint8_t>;
+ using node_ptr = list_node_type*;
+
+ static constexpr size_type MAX_LEVEL = traits_type::MAX_LEVEL;
+
+ using array_type = std::array<node_ptr, MAX_LEVEL>;
+ using lock_array = std::array<typename list_node_type::lock_type, MAX_LEVEL>;
+
+public:
+ static bool const allow_multimapping = traits_type::allow_multimapping;
+
+ /**
+ * Default constructor. Construct empty skip list.
+ */
+ concurrent_skip_list() : my_size(0) {
+ create_dummy_head();
+ }
+
+ explicit concurrent_skip_list(const key_compare& comp, const allocator_type& alloc = allocator_type())
+ : my_node_allocator(alloc), my_compare(comp), my_size(0)
+ {
+ create_dummy_head();
+ }
+
+ template<class InputIt>
+ concurrent_skip_list(InputIt first, InputIt last, const key_compare& comp = key_compare(),
+ const allocator_type& alloc = allocator_type())
+ : my_node_allocator(alloc), my_compare(comp), my_size(0)
+ {
+ create_dummy_head();
+ internal_copy(first, last);
+ }
+
+ /** Copy constructor */
+ concurrent_skip_list(const concurrent_skip_list& other)
+ : my_node_allocator(node_allocator_traits::select_on_container_copy_construction(other.get_allocator())),
+ my_compare(other.my_compare), my_rnd_generator(other.my_rnd_generator), my_size(0)
+ {
+ create_dummy_head();
+ internal_copy(other);
+ __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container");
+ }
+
+ concurrent_skip_list(const concurrent_skip_list& other, const allocator_type& alloc)
+ : my_node_allocator(alloc), my_compare(other.my_compare),
+ my_rnd_generator(other.my_rnd_generator), my_size(0)
+ {
+ create_dummy_head();
+ internal_copy(other);
+ __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container");
+ }
+
+ concurrent_skip_list(concurrent_skip_list&& other)
+ : my_node_allocator(std::move(other.my_node_allocator)), my_compare(other.my_compare),
+ my_rnd_generator(other.my_rnd_generator)
+ {
+ internal_move(std::move(other));
+ }
+
+ concurrent_skip_list(concurrent_skip_list&& other, const allocator_type& alloc)
+ : my_node_allocator(alloc), my_compare(other.my_compare),
+ my_rnd_generator(other.my_rnd_generator)
+ {
+ if (alloc == other.get_allocator()) {
+ internal_move(std::move(other));
+ } else {
+ my_size = 0;
+ create_dummy_head();
+ internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()));
+ }
+ }
+
+ ~concurrent_skip_list() {
+ clear();
+ delete_dummy_head();
+ }
+
+ concurrent_skip_list& operator=(const concurrent_skip_list& other) {
+ if (this != &other) {
+ using pocca_type = typename node_allocator_traits::propagate_on_container_copy_assignment;
+ clear();
+ tbb::internal::allocator_copy_assignment(my_node_allocator, other.my_node_allocator, pocca_type());
+ my_compare = other.my_compare;
+ my_rnd_generator = other.my_rnd_generator;
+ internal_copy(other);
+ }
+ return *this;
+ }
+
+ concurrent_skip_list& operator=(concurrent_skip_list&& other) {
+ if (this != &other) {
+ using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment;
+ clear();
+ my_compare = other.my_compare;
+ my_rnd_generator = other.my_rnd_generator;
+ internal_move_assign(std::move(other), pocma_type());
+ }
+ return *this;
+ }
+
+ concurrent_skip_list& operator=(std::initializer_list<value_type> il)
+ {
+ clear();
+ insert(il.begin(),il.end());
+ return *this;
+ }
+
+ std::pair<iterator, bool> insert(const value_type& value) {
+ return internal_insert(value);
+ }
+
+ std::pair<iterator, bool> insert(value_type&& value) {
+ return internal_insert(std::move(value));
+ }
+
+ iterator insert(const_iterator, const_reference value) {
+ // Ignore hint
+ return insert(value).first;
+ }
+
+ iterator insert(const_iterator, value_type&& value) {
+ // Ignore hint
+ return insert(std::move(value)).first;
+ }
+
+ template<typename InputIterator>
+ void insert(InputIterator first, InputIterator last) {
+ for (InputIterator it = first; it != last; ++it)
+ insert(*it);
+ }
+
+ void insert(std::initializer_list<value_type> init) {
+ insert(init.begin(), init.end());
+ }
+
+ std::pair<iterator, bool> insert(node_type&& nh) {
+ if(!nh.empty()) {
+ std::pair<iterator, bool> insert_result = internal_insert_node(nh.my_node);
+ if(insert_result.second) {
+ nh.deactivate();
+ }
+ return insert_result;
+ }
+ return std::pair<iterator, bool>(end(), false);
+ }
+
+ iterator insert(const_iterator, node_type&& nh) {
+ // Ignore hint
+ return insert(std::move(nh)).first;
+ }
+
+ template<typename... Args >
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return internal_insert(std::forward<Args>(args)...);
+ }
+
+ template<typename... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ // Ignore hint
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ iterator unsafe_erase(iterator pos) {
+ std::pair<node_ptr, node_ptr> extract_result = internal_extract(pos);
+ if(extract_result.first) { // node was extracted
+ delete_node(extract_result.first);
+ return extract_result.second;
+ }
+ return end();
+ }
+
+ iterator unsafe_erase(const_iterator first, const_iterator last) {
+ while(first != last) {
+ first = unsafe_erase(get_iterator(first));
+ }
+ return get_iterator(first);
+ }
+
+ size_type unsafe_erase(const key_type& key) {
+ std::pair<iterator, iterator> range = equal_range(key);
+ size_type sz = std::distance(range.first, range.second);
+ unsafe_erase(range.first, range.second);
+ return sz;
+ }
+
+ node_type unsafe_extract(const_iterator pos) {
+ std::pair<node_ptr, node_ptr> extract_result = internal_extract(pos);
+ return extract_result.first ? node_type(extract_result.first) : node_type();
+ }
+
+ node_type unsafe_extract(const key_type& key) {
+ return unsafe_extract(find(key));
+ }
+
+ iterator lower_bound(const key_type& key) {
+ return internal_get_bound(key, my_compare);
+ }
+
+ const_iterator lower_bound(const key_type& key) const {
+ return internal_get_bound(key, my_compare);
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ iterator lower_bound(const K& key) {
+ return internal_get_bound(key, my_compare);
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ const_iterator lower_bound(const K& key) const {
+ return internal_get_bound(key, my_compare);
+ }
+
+ iterator upper_bound(const key_type& key) {
+ return internal_get_bound(key, not_greater_compare(my_compare));
+ }
+
+ const_iterator upper_bound(const key_type& key) const {
+ return internal_get_bound(key, not_greater_compare(my_compare));
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ iterator upper_bound(const K& key) {
+ return internal_get_bound(key, not_greater_compare(my_compare));
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ const_iterator upper_bound(const K& key) const {
+ return internal_get_bound(key, not_greater_compare(my_compare));
+ }
+
+ iterator find(const key_type& key) {
+ return internal_find(key);
+ }
+
+ const_iterator find(const key_type& key) const {
+ return internal_find(key);
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ iterator find(const K& key) {
+ return internal_find(key);
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ const_iterator find(const K& key) const {
+ return internal_find(key);
+ }
+
+ size_type count( const key_type& key ) const {
+ return internal_count(key);
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ size_type count(const K& key) const {
+ return internal_count(key);
+ }
+
+ bool contains(const key_type& key) const {
+ return find(key) != end();
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ bool contains(const K& key) const {
+ return find(key) != end();
+ }
+
+ void clear() noexcept {
+ __TBB_ASSERT(dummy_head->height() > 0, NULL);
+
+ node_ptr current = dummy_head->next(0);
+ while (current) {
+ __TBB_ASSERT(current->height() > 0, NULL);
+ node_ptr next = current->next(0);
+ delete_node(current);
+ current = next;
+ }
+
+ my_size = 0;
+ for (size_type i = 0; i < dummy_head->height(); ++i) {
+ dummy_head->set_next(i, nullptr);
+ }
+ }
+
+ iterator begin() {
+ return iterator(dummy_head->next(0));
+ }
+
+ const_iterator begin() const {
+ return const_iterator(dummy_head->next(0));
+ }
+
+ const_iterator cbegin() const {
+ return const_iterator(dummy_head->next(0));
+ }
+
+ iterator end() {
+ return iterator(nullptr);
+ }
+
+ const_iterator end() const {
+ return const_iterator(nullptr);
+ }
+
+ const_iterator cend() const {
+ return const_iterator(nullptr);
+ }
+
+ size_type size() const {
+ return my_size.load(std::memory_order_relaxed);
+ }
+
+ size_type max_size() const {
+ return my_node_allocator.max_size();
+ }
+
+ bool empty() const {
+ return 0 == size();
+ }
+
+ allocator_type get_allocator() const {
+ return my_node_allocator;
+ }
+
+ void swap(concurrent_skip_list& other) {
+ using std::swap;
+ using pocs_type = typename node_allocator_traits::propagate_on_container_swap;
+ tbb::internal::allocator_swap(my_node_allocator, other.my_node_allocator, pocs_type());
+ swap(my_compare, other.my_compare);
+ swap(my_rnd_generator, other.my_rnd_generator);
+ swap(dummy_head, other.dummy_head);
+
+ size_type tmp = my_size;
+ my_size.store(other.my_size);
+ other.my_size.store(tmp);
+ }
+
+ std::pair<iterator, iterator> equal_range(const key_type& key) {
+ return std::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ }
+
+ std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
+ return std::pair<const_iterator, const_iterator>(lower_bound(key), upper_bound(key));
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ std::pair<iterator, iterator> equal_range(const K& key) {
+ return std::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ }
+
+ template<typename K, typename = typename std::enable_if<tbb::internal::has_is_transparent<key_compare>::value, K>::type>
+ std::pair<const_iterator, const_iterator> equal_range(const K& key) const {
+ return std::pair<const_iterator, const_iterator>(lower_bound(key), upper_bound(key));
+ }
+
+ key_compare key_comp() const { return my_compare; }
+
+ value_compare value_comp() const { return traits_type::value_comp(my_compare); }
+
+ class const_range_type : tbb::internal::no_assign {
+ public:
+ using size_type = typename concurrent_skip_list::size_type;
+ using value_type = typename concurrent_skip_list::value_type;
+ using iterator = typename concurrent_skip_list::const_iterator;
+ private:
+ const_iterator my_end;
+ const_iterator my_begin;
+ size_type my_level;
+
+ public:
+
+ bool empty() const {
+ return my_begin.my_node_ptr->next(0) == my_end.my_node_ptr;
+ }
+
+ bool is_divisible() const {
+ return my_level != 0 ? my_begin.my_node_ptr->next(my_level - 1) != my_end.my_node_ptr : false;
+ }
+
+ size_type size() const { return std::distance(my_begin, my_end);}
+
+ const_range_type( const_range_type& r, split)
+ : my_end(r.my_end) {
+ my_begin = iterator(r.my_begin.my_node_ptr->next(r.my_level - 1));
+ my_level = my_begin.my_node_ptr->height();
+ r.my_end = my_begin;
+ }
+
+ const_range_type( const concurrent_skip_list& l)
+ : my_end(l.end()), my_begin(l.begin()), my_level(my_begin.my_node_ptr->height() ) {}
+
+ iterator begin() const { return my_begin; }
+ iterator end() const { return my_end; }
+ size_t grainsize() const { return 1; }
+
+ }; // class const_range_type
+
+ class range_type : public const_range_type {
+ public:
+ using iterator = typename concurrent_skip_list::iterator;
+
+ range_type(range_type& r, split) : const_range_type(r, split()) {}
+ range_type(const concurrent_skip_list& l) : const_range_type(l) {}
+
+ iterator begin() const {
+ node_ptr node = const_range_type::begin().my_node_ptr;
+ return iterator(node);
+ }
+
+ iterator end() const {
+ node_ptr node = const_range_type::end().my_node_ptr;
+ return iterator(node); }
+ }; // class range_type
+
+ range_type range() { return range_type(*this); }
+ const_range_type range() const { return const_range_type(*this); }
+
+private:
+ void internal_move(concurrent_skip_list&& other) {
+ dummy_head = other.dummy_head;
+ other.dummy_head = nullptr;
+ other.create_dummy_head();
+
+ my_size = other.my_size.load();
+ other.my_size = 0;
+ }
+
+ static const key_type& get_key(node_ptr n) {
+ __TBB_ASSERT(n, NULL);
+ return traits_type::get_key(n->value());
+ }
+
+ template <typename K>
+ iterator internal_find(const K& key) {
+ iterator it = lower_bound(key);
+ return (it == end() || my_compare(key, traits_type::get_key(*it))) ? end() : it;
+ }
+
+ template <typename K>
+ const_iterator internal_find(const K& key) const {
+ const_iterator it = lower_bound(key);
+ return (it == end() || my_compare(key, traits_type::get_key(*it))) ? end() : it;
+ }
+
+ template <typename K>
+ size_type internal_count( const K& key ) const {
+ if (allow_multimapping) {
+ std::pair<const_iterator, const_iterator> range = equal_range(key);
+ return std::distance(range.first, range.second);
+ }
+ return (find(key) == end()) ? size_type(0) : size_type(1);
+ }
+
+ /**
+ * Finds position on the @param level using @param cmp
+ * @param level - on which level search prev node
+ * @param prev - pointer to the start node to search
+ * @param key - key to search
+ * @param cmp - callable object to compare two objects
+ * (my_compare member is default comparator)
+ * @returns pointer to the node which is not satisfy the comparison with @param key
+ */
+ template <typename K, typename pointer_type, typename comparator>
+ pointer_type internal_find_position( size_type level, pointer_type& prev, const K& key,
+ const comparator& cmp) const {
+ __TBB_ASSERT(level < prev->height(), "Wrong level to find position");
+ pointer_type curr = prev->next(level);
+
+ while (curr && cmp(get_key(curr), key)) {
+ prev = curr;
+ __TBB_ASSERT(level < prev->height(), NULL);
+ curr = prev->next(level);
+ }
+
+ return curr;
+ }
+
+ template <typename comparator>
+ void fill_prev_next_arrays(array_type& prev_nodes, array_type& next_nodes, node_ptr prev, const key_type& key,
+ const comparator& cmp) {
+ prev_nodes.fill(dummy_head);
+ next_nodes.fill(nullptr);
+
+ for (size_type h = prev->height(); h > 0; --h) {
+ node_ptr next = internal_find_position(h - 1, prev, key, cmp);
+ prev_nodes[h - 1] = prev;
+ next_nodes[h - 1] = next;
+ }
+ }
+
+ template<typename... Args>
+ std::pair<iterator, bool> internal_insert(Args&&... args) {
+ node_ptr new_node = create_node(std::forward<Args>(args)...);
+ std::pair<iterator, bool> insert_result = internal_insert_node(new_node);
+ if(!insert_result.second) {
+ delete_node(new_node);
+ }
+ return insert_result;
+ }
+
+ std::pair<iterator, bool> internal_insert_node(node_ptr new_node) {
+ array_type prev_nodes;
+ array_type next_nodes;
+ __TBB_ASSERT(dummy_head->height() >= new_node->height(), "Wrong height for new node");
+
+ do {
+ if (allow_multimapping) {
+ fill_prev_next_arrays(prev_nodes, next_nodes, dummy_head, get_key(new_node),
+ not_greater_compare(my_compare));
+ } else {
+ fill_prev_next_arrays(prev_nodes, next_nodes, dummy_head, get_key(new_node), my_compare);
+ }
+
+ node_ptr next = next_nodes[0];
+ if (next && !allow_multimapping && !my_compare(get_key(new_node), get_key(next))) {
+ // TODO: do we really need to wait?
+ while (!next->fully_linked()) {
+ // TODO: atomic backoff
+ }
+
+ return std::pair<iterator, bool>(iterator(next), false);
+ }
+ __TBB_ASSERT(allow_multimapping || !next || my_compare(get_key(new_node), get_key(next)),
+ "Wrong elements order");
+
+ } while (!try_insert_node(new_node, prev_nodes, next_nodes));
+
+ __TBB_ASSERT(new_node, NULL);
+ return std::pair<iterator, bool>(iterator(new_node), true);
+ }
+
+ bool try_insert_node(node_ptr new_node, array_type& prev_nodes, array_type& next_nodes) {
+ __TBB_ASSERT(dummy_head->height() >= new_node->height(), NULL);
+
+ lock_array locks;
+
+ if (!try_lock_nodes(new_node->height(), prev_nodes, next_nodes, locks)) {
+ return false;
+ }
+
+ __TBB_ASSERT(allow_multimapping ||
+ ((prev_nodes[0] == dummy_head ||
+ my_compare(get_key(prev_nodes[0]), get_key(new_node))) &&
+ (next_nodes[0] == nullptr || my_compare(get_key(new_node), get_key(next_nodes[0])))),
+ "Wrong elements order");
+
+ for (size_type level = 0; level < new_node->height(); ++level) {
+ __TBB_ASSERT(prev_nodes[level]->height() > level, NULL);
+ __TBB_ASSERT(prev_nodes[level]->next(level) == next_nodes[level], NULL);
+ new_node->set_next(level, next_nodes[level]);
+ prev_nodes[level]->set_next(level, new_node);
+ }
+ new_node->mark_linked();
+
+ ++my_size;
+
+ return true;
+ }
+
+ bool try_lock_nodes(size_type height, array_type& prevs, array_type& next_nodes, lock_array& locks) {
+ for (size_type l = 0; l < height; ++l) {
+ if (l == 0 || prevs[l] != prevs[l - 1])
+ locks[l] = prevs[l]->acquire();
+
+ node_ptr next = prevs[l]->next(l);
+ if ( next != next_nodes[l]) return false;
+ }
+
+ return true;
+ }
+
+ template <typename K, typename comparator>
+ const_iterator internal_get_bound(const K& key, const comparator& cmp) const {
+ node_ptr prev = dummy_head;
+ __TBB_ASSERT(dummy_head->height() > 0, NULL);
+ node_ptr next = nullptr;
+
+ for (size_type h = prev->height(); h > 0; --h) {
+ next = internal_find_position(h - 1, prev, key, cmp);
+ }
+
+ return const_iterator(next);
+ }
+
+ template <typename K, typename comparator>
+ iterator internal_get_bound(const K& key, const comparator& cmp){
+ node_ptr prev = dummy_head;
+ __TBB_ASSERT(dummy_head->height() > 0, NULL);
+ node_ptr next = nullptr;
+
+ for (size_type h = prev->height(); h > 0; --h) {
+ next = internal_find_position(h - 1, prev, key, cmp);
+ }
+
+ return iterator(next);
+ }
+
+ // Returns node_ptr to the extracted node and node_ptr to the next node after the extracted
+ std::pair<node_ptr, node_ptr> internal_extract(const_iterator it) {
+ if ( it != end() ) {
+ key_type key = traits_type::get_key(*it);
+ node_ptr prev = dummy_head;
+ __TBB_ASSERT(dummy_head->height() > 0, NULL);
+
+ array_type prev_nodes;
+ array_type next_nodes;
+
+ fill_prev_next_arrays(prev_nodes, next_nodes, prev, key, my_compare);
+
+ node_ptr erase_node = next_nodes[0];
+ node_ptr next_node = erase_node->next(0);
+
+ if (erase_node && !my_compare(key, get_key(erase_node))) {
+ for(size_type level = 0; level < erase_node->height(); ++level) {
+ __TBB_ASSERT(prev_nodes[level]->height() > level, NULL);
+ __TBB_ASSERT(next_nodes[level] == erase_node, NULL);
+ prev_nodes[level]->set_next(level, erase_node->next(level));
+ }
+ --my_size;
+ return std::pair<node_ptr, node_ptr>(erase_node, next_node);
+ }
+ }
+ return std::pair<node_ptr, node_ptr>(nullptr, nullptr);
+ }
+
+protected:
+ template<typename SourceType>
+ void internal_merge(SourceType&& source) {
+ using source_type = typename std::decay<SourceType>::type;
+ using source_iterator = typename source_type::iterator;
+ __TBB_STATIC_ASSERT((std::is_same<node_type, typename source_type::node_type>::value), "Incompatible containers cannot be merged");
+
+ for(source_iterator it = source.begin(); it != source.end();) {
+ source_iterator where = it++;
+ if (allow_multimapping || !contains(traits_type::get_key(*where))) {
+ std::pair<node_ptr, node_ptr> extract_result = source.internal_extract(where);
+
+ //If the insertion fails - return the node into source
+ node_type handle(extract_result.first);
+ __TBB_ASSERT(!handle.empty(), "Extracted handle in merge is empty");
+
+ if (!insert(std::move(handle)).second) {
+ source.insert(std::move(handle));
+ }
+ handle.deactivate();
+ }
+ }
+ }
+
+private:
+ void internal_copy(const concurrent_skip_list& other) {
+ internal_copy(other.begin(), other.end());
+ }
+
+ template<typename Iterator>
+ void internal_copy(Iterator first, Iterator last) {
+ clear();
+ try {
+ for (auto it = first; it != last; ++it)
+ insert(*it);
+ }
+ catch (...) {
+ clear();
+ delete_dummy_head();
+ throw;
+ }
+ }
+
+ /** Generate random level */
+ size_type random_level() {
+ return my_rnd_generator();
+ }
+
+ static size_type calc_node_size(size_type height) {
+ return sizeof(list_node_type) + height*sizeof(typename list_node_type::atomic_node_pointer);
+ }
+
+ /** Creates new node */
+ template <typename... Args>
+ node_ptr create_node(Args&&... args) {
+ size_type levels = random_level();
+
+ size_type sz = calc_node_size(levels);
+
+ node_ptr node = reinterpret_cast<node_ptr>(node_allocator_traits::allocate(my_node_allocator, sz));
+
+ try {
+ node_allocator_traits::construct(my_node_allocator, node, levels);
+
+ }
+ catch(...) {
+ deallocate_node(node, sz);
+ throw;
+ }
+
+ try {
+ node_allocator_traits::construct(my_node_allocator, node->storage(), std::forward<Args>(args)...);
+ }
+ catch (...) {
+ node_allocator_traits::destroy(my_node_allocator, node);
+ deallocate_node(node, sz);
+ throw;
+ }
+
+ return node;
+ }
+
+ void create_dummy_head() {
+ size_type sz = calc_node_size(MAX_LEVEL);
+
+ dummy_head = reinterpret_cast<node_ptr>(node_allocator_traits::allocate(my_node_allocator, sz));
+ // TODO: investigate linkage fail in debug without this workaround
+ auto max_level = MAX_LEVEL;
+
+ try {
+ node_allocator_traits::construct(my_node_allocator, dummy_head, max_level);
+ }
+ catch(...) {
+ deallocate_node(dummy_head, sz);
+ throw;
+ }
+ }
+
+ template <bool is_dummy = false>
+ void delete_node(node_ptr node) {
+ size_type sz = calc_node_size(node->height());
+ // Destroy value
+ if (!is_dummy) node_allocator_traits::destroy(my_node_allocator, node->storage());
+ // Destroy node
+ node_allocator_traits::destroy(my_node_allocator, node);
+ // Deallocate memory
+ deallocate_node(node, sz);
+ }
+
+ void deallocate_node(node_ptr node, size_type sz) {
+ node_allocator_traits::deallocate(my_node_allocator, reinterpret_cast<uint8_t*>(node), sz);
+ }
+
+ void delete_dummy_head() {
+ delete_node<true>(dummy_head);
+ }
+
+ static iterator get_iterator(const_iterator it) {
+ return iterator(it.my_node_ptr);
+ }
+
+ void internal_move_assign(concurrent_skip_list&& other, /*POCMA=*/std::true_type) {
+ delete_dummy_head();
+ tbb::internal::allocator_move_assignment(my_node_allocator, other.my_node_allocator, std::true_type());
+ internal_move(std::move(other));
+ }
+
+ void internal_move_assign(concurrent_skip_list&& other, /*POCMA=*/std::false_type) {
+ if (my_node_allocator == other.my_node_allocator) {
+ delete_dummy_head();
+ internal_move(std::move(other));
+ } else {
+ internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()));
+ }
+ }
+
+ struct not_greater_compare {
+ const key_compare& my_less_compare;
+
+ not_greater_compare(const key_compare& less_compare) : my_less_compare(less_compare) {}
+
+ template <typename K1, typename K2>
+ bool operator()(const K1& first, const K2& second) const {
+ return !my_less_compare(second, first);
+ }
+ };
+
+ node_allocator_type my_node_allocator;
+ key_compare my_compare;
+ random_level_generator_type my_rnd_generator;
+ node_ptr dummy_head;
+
+ template<typename OtherTraits>
+ friend class concurrent_skip_list;
+
+ std::atomic<size_type> my_size;
+}; // class concurrent_skip_list
+
+template <size_t MAX_LEVEL>
+class concurrent_geometric_level_generator {
+public:
+ static constexpr size_t max_level = MAX_LEVEL;
+
+ concurrent_geometric_level_generator() : engines(time(NULL)) {}
+
+ size_t operator()() {
+ return (distribution(engines.local()) % MAX_LEVEL) + 1;
+ }
+
+private:
+ tbb::enumerable_thread_specific<std::mt19937_64> engines;
+ std::geometric_distribution<size_t> distribution;
+};
+
+} // namespace internal
+} // namespace interface10
+} // namespace tbb
+
+#endif // __TBB_concurrent_skip_list_H
#include "_tbb_hash_compare_impl.h"
#include "_template_helpers.h"
+#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
+#include "_node_handle_impl.h"
+#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
+
namespace tbb {
namespace interface5 {
//! @cond INTERNAL
return my_order_key;
}
+ // get() and value() is a common interface for getting access to node`s element (required by node_handle)
+ value_type* storage() {
+ return reinterpret_cast<value_type*>(&my_element);
+ }
+
+ value_type& value() {
+ return *storage();
+ }
+
// Inserts the new element in the list in an atomic fashion
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
{
#pragma warning(pop) // warning 4127 is back
#endif
-#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
-
-template<typename Value, typename Allocator>
-class node_handle_base {
-public:
- typedef Allocator allocator_type;
-protected:
- typedef typename split_ordered_list<Value, allocator_type>::node node;
-public:
-
- node_handle_base() : my_node(NULL), my_allocator() {}
- node_handle_base(node_handle_base&& nh) : my_node(nh.my_node),
- my_allocator(std::move(nh.my_allocator)) {
- nh.my_node = NULL;
- }
-
- bool empty() const { return my_node == NULL; }
- explicit operator bool() const { return my_node != NULL; }
-
- ~node_handle_base() { internal_destroy(); }
-
- node_handle_base& operator=(node_handle_base&& nh) {
- internal_destroy();
- my_node = nh.my_node;
- typedef typename tbb::internal::allocator_traits<allocator_type>::
- propagate_on_container_move_assignment pocma_type;
- tbb::internal::allocator_move_assignment(my_allocator, nh.my_allocator, pocma_type());
- nh.deactivate();
- return *this;
- }
-
- void swap(node_handle_base& nh) {
- std::swap(my_node, nh.my_node);
- typedef typename tbb::internal::allocator_traits<allocator_type>::
- propagate_on_container_swap pocs_type;
- tbb::internal::allocator_swap(my_allocator, nh.my_allocator, pocs_type());
- }
-
- allocator_type get_allocator() const {
- return my_allocator;
- }
-
-protected:
- node_handle_base(node* n) : my_node(n) {}
-
- void internal_destroy() {
- if(my_node) {
- my_allocator.destroy(&(my_node->my_element));
- // TODO: Consider using node_allocator from the container
- typename tbb::internal::allocator_rebind<allocator_type, node>::type node_allocator;
- node_allocator.deallocate(my_node, 1);
- }
- }
-
- void deactivate() { my_node = NULL; }
-
- node* my_node;
- allocator_type my_allocator;
-};
-
-// node handle for concurrent_unordered maps
-template<typename Key, typename Value, typename Allocator>
-class node_handle : public node_handle_base<Value, Allocator> {
- typedef node_handle_base<Value, Allocator> base_type;
-public:
- typedef Key key_type;
- typedef typename Value::second_type mapped_type;
- typedef typename base_type::allocator_type allocator_type;
-
- node_handle() : base_type() {}
-
- key_type& key() const {
- __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object");
- return *const_cast<key_type*>(&(this->my_node->my_element.first));
- }
-
- mapped_type& mapped() const {
- __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object");
- return this->my_node->my_element.second;
- }
-
-private:
- template<typename T, typename A>
- friend class split_ordered_list;
-
- template<typename Traits>
- friend class concurrent_unordered_base;
-
- node_handle(typename base_type::node* n) : base_type(n) {}
-};
-
-// node handle for concurrent_unordered sets
-template<typename Key, typename Allocator>
-class node_handle<Key, Key, Allocator> : public node_handle_base<Key, Allocator> {
- typedef node_handle_base<Key, Allocator> base_type;
-public:
- typedef Key value_type;
- typedef typename base_type::allocator_type allocator_type;
-
- node_handle() : base_type() {}
-
- value_type& value() const {
- __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object");
- return *const_cast<value_type*>(&(this->my_node->my_element));
- }
-
-private:
- template<typename T, typename A>
- friend class split_ordered_list;
-
- template<typename Traits>
- friend class concurrent_unordered_base;
-
- node_handle(typename base_type::node* n) : base_type(n) {}
-};
-
-#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
-
} // namespace internal
//! @endcond
} // namespace interface5
Output operator()( const Input & ) const { return Output(); }
};
+template<typename T, typename DecrementType, typename DummyType = void>
+class decrementer;
+
+template<typename T, typename DecrementType>
+class decrementer<T, DecrementType,
+ typename tbb::internal::enable_if<
+ tbb::internal::is_integral<DecrementType>::value, void>::type
+ > : public receiver<DecrementType>, tbb::internal::no_copy {
+ T* my_node;
+protected:
+
+ task* try_put_task( const DecrementType& value ) __TBB_override {
+ task* result = my_node->decrement_counter( value );
+ if( !result )
+ result = SUCCESSFULLY_ENQUEUED;
+ return result;
+ }
+
+ graph& graph_reference() __TBB_override {
+ return my_node->my_graph;
+ }
+
+ template<typename U, typename V> friend class tbb::flow::interface11::limiter_node;
+ void reset_receiver( reset_flags f ) __TBB_override {
+#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION
+ if (f & rf_clear_edges)
+ my_built_predecessors.clear();
+#else
+ tbb::internal::suppress_unused_warning( f );
+#endif
+ }
+
+public:
+ // Since decrementer does not make use of possibly unconstructed owner inside its
+ // constructor, my_node can be directly initialized with 'this' pointer passed from the
+ // owner, hence making method 'set_owner' needless.
+ decrementer() : my_node(NULL) {}
+ void set_owner( T *node ) { my_node = node; }
+
+#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION
+ spin_mutex my_mutex;
+ //! The predecessor type for this node
+ typedef typename receiver<DecrementType>::predecessor_type predecessor_type;
+
+ typedef internal::edge_container<predecessor_type> built_predecessors_type;
+ typedef typename built_predecessors_type::edge_list_type predecessor_list_type;
+ built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; }
+
+ void internal_add_built_predecessor( predecessor_type &s) __TBB_override {
+ spin_mutex::scoped_lock l(my_mutex);
+ my_built_predecessors.add_edge( s );
+ }
+
+ void internal_delete_built_predecessor( predecessor_type &s) __TBB_override {
+ spin_mutex::scoped_lock l(my_mutex);
+ my_built_predecessors.delete_edge(s);
+ }
+
+ void copy_predecessors( predecessor_list_type &v) __TBB_override {
+ spin_mutex::scoped_lock l(my_mutex);
+ my_built_predecessors.copy_edges(v);
+ }
+
+ size_t predecessor_count() __TBB_override {
+ spin_mutex::scoped_lock l(my_mutex);
+ return my_built_predecessors.edge_count();
+ }
+protected:
+ built_predecessors_type my_built_predecessors;
+#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */
+};
+
template<typename T>
-class decrementer : public continue_receiver, tbb::internal::no_copy {
+class decrementer<T, continue_msg, void> : public continue_receiver, tbb::internal::no_copy {
T *my_node;
task *execute() __TBB_override {
- return my_node->decrement_counter();
+ return my_node->decrement_counter( 1 );
}
protected:
return last_task;
}
+ // call try_put_task and return list of received tasks
+#if __TBB_PREVIEW_ASYNC_MSG
+ template<typename X>
+ bool gather_successful_try_puts( const X &t, task_list &tasks ) {
+#else
+ bool gather_successful_try_puts( const T &t, task_list &tasks ) {
+#endif // __TBB_PREVIEW_ASYNC_MSG
+ bool upgraded = true;
+ bool is_at_least_one_put_successful = false;
+ typename mutex_type::scoped_lock l(this->my_mutex, upgraded);
+ typename successors_type::iterator i = this->my_successors.begin();
+ while ( i != this->my_successors.end() ) {
+ task * new_task = (*i)->try_put_task(t);
+ if(new_task) {
+ ++i;
+ if(new_task != SUCCESSFULLY_ENQUEUED) {
+ tasks.push_back(*new_task);
+ }
+ is_at_least_one_put_successful = true;
+ }
+ else { // failed
+ if ( (*i)->register_predecessor(*this->my_owner) ) {
+ if (!upgraded) {
+ l.upgrade_to_writer();
+ upgraded = true;
+ }
+ i = this->my_successors.erase(i);
+ } else {
+ ++i;
+ }
+ }
+ }
+ return is_at_least_one_put_successful;
+ }
};
//! A cache of successors that are put in a round-robin fashion
#define __TBB_FLOW_GRAPH_PRIORITY_ARG1( arg1, priority ) arg1
#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+#define __TBB_DEPRECATED_LIMITER_EXPR( expr ) expr
+#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1, arg2
+#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg3, arg4
+#else
+#define __TBB_DEPRECATED_LIMITER_EXPR( expr )
+#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1
+#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg2
+#endif // TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+
namespace tbb {
namespace flow {
void activate_graph(graph& g);
void deactivate_graph(graph& g);
bool is_graph_active(graph& g);
+tbb::task& prioritize_task(graph& g, tbb::task& arena_task);
void spawn_in_graph_arena(graph& g, tbb::task& arena_task);
+void enqueue_in_graph_arena(graph &g, tbb::task& arena_task);
void add_task_to_graph_reset_list(graph& g, tbb::task *tp);
-template<typename F> void execute_in_graph_arena(graph& g, F& f);
#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES
struct graph_task_comparator {
friend void internal::activate_graph(graph& g);
friend void internal::deactivate_graph(graph& g);
friend bool internal::is_graph_active(graph& g);
+ friend tbb::task& internal::prioritize_task(graph& g, tbb::task& arena_task);
friend void internal::spawn_in_graph_arena(graph& g, tbb::task& arena_task);
+ friend void internal::enqueue_in_graph_arena(graph &g, tbb::task& arena_task);
friend void internal::add_task_to_graph_reset_list(graph& g, tbb::task *tp);
- template<typename F> friend void internal::execute_in_graph_arena(graph& g, F& f);
-#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES
- template<typename Input, typename Output, typename Policy, typename Allocator>
- friend class async_node;
-#endif
friend class tbb::interface7::internal::task_arena_base;
return g.my_is_active;
}
-//! Executes custom functor inside graph arena
-template<typename F>
-inline void execute_in_graph_arena(graph& g, F& f) {
- if (is_graph_active(g)) {
- __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), NULL);
- g.my_task_arena->execute(f);
- }
-}
-
-//! Spawns a task inside graph arena
-inline void spawn_in_graph_arena(graph& g, tbb::task& arena_task) {
- task* task_to_spawn = &arena_task;
#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES
+inline tbb::task& prioritize_task(graph& g, tbb::task& t) {
+ task* critical_task = &t;
// TODO: change flow graph's interfaces to work with graph_task type instead of tbb::task.
- graph_task* t = static_cast<graph_task*>(&arena_task);
- if( t->priority != no_priority ) {
+ graph_task* gt = static_cast<graph_task*>(&t);
+ if( gt->priority != no_priority ) {
//! Non-preemptive priority pattern. The original task is submitted as a work item to the
//! priority queue, and a new critical task is created to take and execute a work item with
//! the highest known priority. The reference counting responsibility is transferred (via
//! allocate_continuation) to the new task.
- task_to_spawn = new( t->allocate_continuation() ) priority_task_selector(g.my_priority_queue);
- tbb::internal::make_critical( *task_to_spawn );
- g.my_priority_queue.push(t);
+ critical_task = new( gt->allocate_continuation() ) priority_task_selector(g.my_priority_queue);
+ tbb::internal::make_critical( *critical_task );
+ g.my_priority_queue.push(gt);
}
+ return *critical_task;
+}
+#else
+inline tbb::task& prioritize_task(graph&, tbb::task& t) {
+ return t;
+}
#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */
- graph::spawn_functor s_fn(*task_to_spawn);
- execute_in_graph_arena(g, s_fn);
+
+//! Spawns a task inside graph arena
+inline void spawn_in_graph_arena(graph& g, tbb::task& arena_task) {
+ if (is_graph_active(g)) {
+ graph::spawn_functor s_fn(prioritize_task(g, arena_task));
+ __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), NULL);
+ g.my_task_arena->execute(s_fn);
+ }
+}
+
+//! Enqueues a task inside graph arena
+inline void enqueue_in_graph_arena(graph &g, tbb::task& arena_task) {
+ if (is_graph_active(g)) {
+ __TBB_ASSERT( g.my_task_arena && g.my_task_arena->is_active(), "Is graph's arena initialized and active?" );
+ task::enqueue(prioritize_task(g, arena_task), *g.my_task_arena);
+ }
}
inline void add_task_to_graph_reset_list(graph& g, tbb::task *tp) {
, add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
#endif
};
- enum op_stat {WAIT=0, SUCCEEDED, FAILED};
typedef reserving_port<T> class_type;
class reserving_port_operation : public aggregated_operation<reserving_port_operation> {
, add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
#endif
};
- enum op_stat {WAIT=0, SUCCEEDED, FAILED};
class queueing_port_operation : public aggregated_operation<queueing_port_operation> {
public:
, add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
#endif
};
- enum op_stat {WAIT=0, SUCCEEDED, FAILED};
class key_matching_port_operation : public aggregated_operation<key_matching_port_operation> {
public:
// and the output_buffer_type base class
private:
enum op_type { res_count, inc_count, may_succeed, try_make };
- enum op_stat {WAIT=0, SUCCEEDED, FAILED};
typedef join_node_FE<key_matching<key_type,key_hash_compare>, InputTuple, OutputTuple> class_type;
class key_matching_FE_operation : public aggregated_operation<key_matching_FE_operation> {
, add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy
#endif
};
- enum op_stat {WAIT=0, SUCCEEDED, FAILED};
typedef join_node_base<JP,InputTuple,OutputTuple> class_type;
class join_node_base_operation : public aggregated_operation<join_node_base_operation> {
typedef KHashp KHash;
};
-// wrap each element of a tuple in a template, and make a tuple of the result.
+ // wrap each element of a tuple in a template, and make a tuple of the result.
template<int N, template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements;
template<int N, template<class> class PT, typename KeyTraits, typename TypeTuple>
struct wrap_key_tuple_elements;
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT
+ template<int N, template<class> class PT, typename... Args>
+ struct wrap_tuple_elements<N, PT, tbb::flow::tuple<Args...> >{
+ typedef typename tbb::flow::tuple<PT<Args>... > type;
+ };
+
+ template<int N, template<class> class PT, typename KeyTraits, typename... Args>
+ struct wrap_key_tuple_elements<N, PT, KeyTraits, tbb::flow::tuple<Args...> > {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef typename tbb::flow::tuple<PT<KeyTrait<K, KHash, Args> >... > type;
+ };
+#else
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<1, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
PT<KeyTrait9> > type;
};
#endif
+#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT */
#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
template< int... S > class sequence {};
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_node_handle_H
+#define __TBB_node_handle_H
+
+#include "_allocator_traits.h"
+#include "../tbb_config.h"
+
+
+namespace tbb {
+
+// This classes must be declared here for correct friendly relationship
+// TODO: Consider creation some internal class to access node_handle private fields without any friendly classes
+namespace interface5 {
+namespace internal {
+ template <typename T, typename Allocator>
+ class split_ordered_list;
+ template <typename Traits>
+ class concurrent_unordered_base;
+}
+}
+
+namespace interface10{
+namespace internal {
+ template<typename Traits>
+ class concurrent_skip_list;
+}
+}
+
+namespace internal {
+
+template<typename Value, typename Node, typename Allocator>
+class node_handle_base {
+public:
+ typedef Allocator allocator_type;
+protected:
+ typedef Node node;
+ typedef tbb::internal::allocator_traits<allocator_type> traits_type;
+public:
+
+ node_handle_base() : my_node(NULL), my_allocator() {}
+ node_handle_base(node_handle_base&& nh) : my_node(nh.my_node),
+ my_allocator(std::move(nh.my_allocator)) {
+ nh.my_node = NULL;
+ }
+
+ bool empty() const { return my_node == NULL; }
+ explicit operator bool() const { return my_node != NULL; }
+
+ ~node_handle_base() { internal_destroy(); }
+
+ node_handle_base& operator=(node_handle_base&& nh) {
+ internal_destroy();
+ my_node = nh.my_node;
+ typedef typename traits_type::propagate_on_container_move_assignment pocma_type;
+ tbb::internal::allocator_move_assignment(my_allocator, nh.my_allocator, pocma_type());
+ nh.deactivate();
+ return *this;
+ }
+
+ void swap(node_handle_base& nh) {
+ std::swap(my_node, nh.my_node);
+ typedef typename traits_type::propagate_on_container_swap pocs_type;
+ tbb::internal::allocator_swap(my_allocator, nh.my_allocator, pocs_type());
+ }
+
+ allocator_type get_allocator() const {
+ return my_allocator;
+ }
+
+protected:
+ node_handle_base(node* n) : my_node(n) {}
+
+ void internal_destroy() {
+ if(my_node) {
+ traits_type::destroy(my_allocator, my_node->storage());
+ typename tbb::internal::allocator_rebind<allocator_type, node>::type node_allocator;
+ node_allocator.deallocate(my_node, 1);
+ }
+ }
+
+ void deactivate() { my_node = NULL; }
+
+ node* my_node;
+ allocator_type my_allocator;
+};
+
+// node handle for maps
+template<typename Key, typename Value, typename Node, typename Allocator>
+class node_handle : public node_handle_base<Value, Node, Allocator> {
+ typedef node_handle_base<Value, Node, Allocator> base_type;
+public:
+ typedef Key key_type;
+ typedef typename Value::second_type mapped_type;
+ typedef typename base_type::allocator_type allocator_type;
+
+ node_handle() : base_type() {}
+
+ key_type& key() const {
+ __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object");
+ return *const_cast<key_type*>(&(this->my_node->value().first));
+ }
+
+ mapped_type& mapped() const {
+ __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object");
+ return this->my_node->value().second;
+ }
+
+private:
+ template<typename T, typename A>
+ friend class tbb::interface5::internal::split_ordered_list;
+
+ template<typename Traits>
+ friend class tbb::interface5::internal::concurrent_unordered_base;
+
+ template<typename Traits>
+ friend class tbb::interface10::internal::concurrent_skip_list;
+
+ node_handle(typename base_type::node* n) : base_type(n) {}
+};
+
+// node handle for sets
+template<typename Key, typename Node, typename Allocator>
+class node_handle<Key, Key, Node, Allocator> : public node_handle_base<Key, Node, Allocator> {
+ typedef node_handle_base<Key, Node, Allocator> base_type;
+public:
+ typedef Key value_type;
+ typedef typename base_type::allocator_type allocator_type;
+
+ node_handle() : base_type() {}
+
+ value_type& value() const {
+ __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object");
+ return *const_cast<value_type*>(&(this->my_node->value()));
+ }
+
+private:
+ template<typename T, typename A>
+ friend class tbb::interface5::internal::split_ordered_list;
+
+ template<typename Traits>
+ friend class tbb::interface5::internal::concurrent_unordered_base;
+
+ template<typename Traits>
+ friend class tbb::interface10::internal::concurrent_skip_list;
+
+ node_handle(typename base_type::node* n) : base_type(n) {}
+};
+
+
+}// namespace internal
+}// namespace tbb
+
+#endif /*__TBB_node_handle_H*/
template<typename T> struct is_ref { static const bool value = false; };
template<typename U> struct is_ref<U&> { static const bool value = true; };
+//! Partial support for std::is_integral
+template<typename T> struct is_integral_impl { static const bool value = false; };
+template<> struct is_integral_impl<bool> { static const bool value = true; };
+template<> struct is_integral_impl<char> { static const bool value = true; };
+#if __TBB_CPP11_PRESENT
+template<> struct is_integral_impl<char16_t> { static const bool value = true; };
+template<> struct is_integral_impl<char32_t> { static const bool value = true; };
+#endif
+template<> struct is_integral_impl<wchar_t> { static const bool value = true; };
+template<> struct is_integral_impl<short> { static const bool value = true; };
+template<> struct is_integral_impl<int> { static const bool value = true; };
+template<> struct is_integral_impl<long> { static const bool value = true; };
+template<> struct is_integral_impl<long long> { static const bool value = true; };
+
+template<typename T>
+struct is_integral : is_integral_impl<typename strip<T>::type> {};
+
#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
//! std::void_t internal implementation (to avoid GCC < 4.7 "template aliases" absence)
template<typename...> struct void_t { typedef void type; };
template< std::size_t N, typename... Args >
using pack_element_t = typename pack_element<N, Args...>::type;
+template <typename Comp> using is_transparent = typename Comp::is_transparent;
+
+template <typename Comp>
+using has_is_transparent = supports<Comp, is_transparent>;
+
#endif /* __TBB_CPP11_PRESENT */
} } // namespace internal, namespace tbb
bool operator==(const zip_iterator& it) const {
return *this - it == 0;
}
+ it_types base() const { return my_it; }
+
bool operator!=(const zip_iterator& it) const { return !(*this == it); }
bool operator<(const zip_iterator& it) const { return *this - it < 0; }
bool operator>(const zip_iterator& it) const { return it < *this; }
TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */
/* deprecated, kept for backward compatibility only */
USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES,
- /* try to limit memory consumption value Bytes, clean internal buffers
+ /* try to limit memory consumption value (Bytes), clean internal buffers
if limit is exceeded, but not prevents from requesting memory from OS */
- TBBMALLOC_SET_SOFT_HEAP_LIMIT
+ TBBMALLOC_SET_SOFT_HEAP_LIMIT,
+ /* Lower bound for the size (Bytes), that is interpreted as huge
+ * and not released during regular cleanup operations. */
+ TBBMALLOC_SET_HUGE_SIZE_THRESHOLD
} AllocationModeParam;
/** Set TBB allocator-specific allocation modes.
//! Destroys the list, but does not destroy the task objects.
~task_list() {}
- //! True if list if empty; false otherwise.
+ //! True if list is empty; false otherwise.
bool empty() const {return !first;}
//! Push task onto back of list.
#include "concurrent_queue.h"
#include "concurrent_unordered_map.h"
#include "concurrent_unordered_set.h"
+#if TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS
+#include "concurrent_map.h"
+#include "concurrent_set.h"
+#endif
#include "concurrent_vector.h"
#include "critical_section.h"
#include "enumerable_thread_specific.h"
#define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1210)
#define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L)
#define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L)
- #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 1901) // a future version
+ #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 1910) // a future version
#define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L)
#elif __clang__
/** TODO: these options need to be rechecked **/
/** Internal TBB features & modes **/
+/** __TBB_CONCURRENT_ORDERED_CONTAINERS indicates that all conditions of use
+ * concurrent_map and concurrent_set are met. **/
+// TODO: Add cpp11 random generation macro
+#ifndef __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+ #define __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT ( __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
+ && __TBB_IMPLICIT_MOVE_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT && __TBB_CPP11_ARRAY_PRESENT \
+ && __TBB_INITIALIZER_LISTS_PRESENT )
+#endif
+
/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/
#ifndef __TBB_WEAK_SYMBOLS_PRESENT
#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) )
#define TBB_VERSION_MINOR 0
// Engineering-focused interface version
-#define TBB_INTERFACE_VERSION 11006
+#define TBB_INTERFACE_VERSION 11007
#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000
// The oldest major interface version still supported
<H2>Intel TBB binary package</H2>
<H3>Directories</H3>
<DL>
-<DT><A HREF="doc/html/index.html">doc</A>
-<DD>Documentation for the library.
<DT><A HREF="bin">bin</A>
<DD>Start-up scripts for sourcing library for Linux* OS and macOS*. For Windows* OS: start-up scripts and dynamic-link libraries.
<DT><A HREF="lib">lib</A>
# limitations under the License.
tbb_root?=..
+BUILDING_PHASE:=0
include $(tbb_root)/build/common.inc
.PHONY: all release test install test-install
all: install test
clean:
- $(PY_SETUP) clean -b$(CURDIR)
+ $(PY_SETUP) clean -b$(work_dir)_release
release: CC=$(compiler)
release: $(SRC) rml
- $(PY_SETUP) build -b$(CURDIR) -f check
+ $(PY_SETUP) build -b$(work_dir)_release -f check
install: CC=$(compiler)
install: $(SRC) rml
- $(PY_SETUP) build -b$(CURDIR) install
+ $(PY_SETUP) build -b$(work_dir)_release build_ext -f -I$(tbb_root)/include -L$(work_dir)_release install -f
test:
python -m tbb test
<DD>Alternative entry point for Python module.
</DL>
-<A NAME=build><H2>Build and install</H2></A>
+<A NAME=build><H2>Build and install (source package only)</H2></A>
For accessing targets defined in python/Makefile, please use
<A HREF="../src/index.html">src/Makefile</A>
instead and build runtime libraries before working with Python.
typedef versioned_object::version_type version_type;
-extern "C" factory::status_type __RML_open_factory(factory& f, version_type& server_version, version_type client_version) {
+extern "C" factory::status_type __RML_open_factory(factory& f, version_type& /*server_version*/, version_type /*client_version*/) {
if( !tbb::internal::rml::get_enable_flag( IPC_ENABLE_VAR_NAME ) ) {
return factory::st_incompatible;
}
return factory::st_success;
}
-extern "C" void __RML_close_factory(factory& f) {
+extern "C" void __RML_close_factory(factory& /*f*/) {
}
class ipc_thread_monitor : public thread_monitor {
#endif /* USE_PTHREAD */
-extern "C" tbb_factory::status_type __TBB_make_rml_server(tbb_factory& f, tbb_server*& server, tbb_client& client) {
+extern "C" tbb_factory::status_type __TBB_make_rml_server(tbb_factory& /*f*/, tbb_server*& server, tbb_client& client) {
server = new( tbb::cache_aligned_allocator<ipc_server>().allocate(1) ) ipc_server(client);
#if USE_PTHREAD
my_global_client = &client;
return tbb_factory::st_success;
}
-extern "C" void __TBB_call_with_my_server_info(::rml::server_info_callback_t cb, void* arg) {
+extern "C" void __TBB_call_with_my_server_info(::rml::server_info_callback_t /*cb*/, void* /*arg*/) {
}
} // namespace rml
global is_active
assert is_active == False, "tbb.Monkey does not support nesting yet"
is_active = True
- self.env = os.getenv('MKL_THREADING_LAYER')
+ self.env_mkl = os.getenv('MKL_THREADING_LAYER')
os.environ['MKL_THREADING_LAYER'] = 'TBB'
+ self.env_numba = os.getenv('NUMBA_THREADING_LAYER')
+ os.environ['NUMBA_THREADING_LAYER'] = 'TBB'
if ipc_enabled:
if sys.version_info.major == 2 and sys.version_info.minor >= 7:
global is_active
assert is_active == True, "modified?"
is_active = False
- if self.env is None:
+ if self.env_mkl is None:
del os.environ['MKL_THREADING_LAYER']
else:
- os.environ['MKL_THREADING_LAYER'] = self.env
+ os.environ['MKL_THREADING_LAYER'] = self.env_mkl
+ if self.env_numba is None:
+ del os.environ['NUMBA_THREADING_LAYER']
+ else:
+ os.environ['NUMBA_THREADING_LAYER'] = self.env_numba
for name in self._items.keys():
setattr(self._modules[name], name, self._items[name])
The parameters are chosen so that CPU and ASYNC work take approximately the same time.
*/
+#define TBB_PREVIEW_FLOW_GRAPH_FEATURES __TBB_CPF_BUILD
+
#include "tbb/task_scheduler_init.h"
#include "tbb/parallel_for.h"
#include "tbb/concurrent_queue.h"
if (oldRegion->type != MEMREG_ONE_BLOCK)
return NULL; // we are not single in the region
const size_t userOffset = (uintptr_t)ptr - (uintptr_t)oldRegion;
+ const size_t alignedSize = LargeObjectCache::alignToBin(newSize);
const size_t requestSize =
- alignUp(userOffset + newSize + sizeof(LastFreeBlock), extMemPool->granularity);
- if (requestSize < newSize) // is wrapped around?
+ alignUp(userOffset + alignedSize + sizeof(LastFreeBlock), extMemPool->granularity);
+ if (requestSize < alignedSize) // is wrapped around?
return NULL;
regionList.remove(oldRegion);
MemRegion *region = (MemRegion*)ret;
MALLOC_ASSERT(region->type == MEMREG_ONE_BLOCK, ASSERT_TEXT);
region->allocSz = requestSize;
+ region->blockSz = alignedSize;
FreeBlock *fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion),
largeObjectAlignment);
- // put LastFreeBlock at the very end of region
- const uintptr_t fBlockEnd = (uintptr_t)region + requestSize - sizeof(LastFreeBlock);
- region->blockSz = fBlockEnd - (uintptr_t)fBlock;
regionList.add(region);
startUseBlock(region, fBlock, /*addToBin=*/false);
limitations under the License.
*/
+#ifndef __TBB_tbbmalloc_internal_H
+ #error tbbmalloc_internal.h must be included at this point
+#endif
+
#ifndef __TBB_backend_H
#define __TBB_backend_H
VALID_BLOCK_IN_BIN = 1 // valid block added to bin, not returned as result
};
public:
- static const int freeBinsNum =
- (maxBinned_HugePage-minBinnedSize)/LargeObjectCache::largeBlockCacheStep + 1;
+ // Backend bins step is the same as CacheStep for large object cache
+ static const size_t freeBinsStep = LargeObjectCache::LargeBSProps::CacheStep;
+ static const unsigned freeBinsNum = (maxBinned_HugePage-minBinnedSize)/freeBinsStep + 1;
// if previous access missed per-thread slabs pool,
// allocate numOfSlabAllocOnMiss blocks in advance
else if (size < minBinnedSize)
return NO_BIN;
- int bin = (size - minBinnedSize)/LargeObjectCache::largeBlockCacheStep;
+ int bin = (size - minBinnedSize)/freeBinsStep;
MALLOC_ASSERT(bin < HUGE_BIN, "Invalid size.");
return bin;
static size_t binToSize(int bin) {
MALLOC_ASSERT(bin <= HUGE_BIN, "Invalid bin.");
- return bin*LargeObjectCache::largeBlockCacheStep + minBinnedSize;
+ return bin*freeBinsStep + minBinnedSize;
}
#endif
};
inline FreeObject* allocate();
inline FreeObject *allocateFromFreeList();
- inline void adjustFullness();
+ inline bool adjustFullness();
void adjustPositionInBin(Bin* bin = NULL);
bool freeListNonNull() { return freeList; }
return released;
}
-void Block::adjustFullness()
+bool Block::adjustFullness()
{
- const float threshold = (slabSize - sizeof(Block)) * (1 - emptyEnoughRatio);
-
if (bumpPtr) {
/* If we are still using a bump ptr for this block it is empty enough to use. */
STAT_increment(getThreadId(), getIndex(objectSize), examineEmptyEnough);
isFull = false;
- return;
- }
-
- /* allocatedCount shows how many objects in the block are in use; however it still counts
- * blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */
- isFull = (allocatedCount*objectSize > threshold) ? true : false;
+ } else {
+ const float threshold = (slabSize - sizeof(Block)) * (1 - emptyEnoughRatio);
+ /* allocatedCount shows how many objects in the block are in use; however it still counts
+ * blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */
+ isFull = (allocatedCount*objectSize > threshold) ? true : false;
#if COLLECT_STATISTICS
- if (isFull)
- STAT_increment(getThreadId(), getIndex(objectSize), examineNotEmpty);
- else
- STAT_increment(getThreadId(), getIndex(objectSize), examineEmptyEnough);
+ if (isFull)
+ STAT_increment(getThreadId(), getIndex(objectSize), examineNotEmpty);
+ else
+ STAT_increment(getThreadId(), getIndex(objectSize), examineEmptyEnough);
#endif
+ }
+ return isFull;
}
// This method resides in class Block, and not in class Bin, in order to avoid
// calling getAllocationBin on a reasonably hot path in Block::freeOwnObject
void Block::adjustPositionInBin(Bin* bin/*=NULL*/)
{
- bool fullBefore = isFull;
- adjustFullness();
- if (fullBefore && !isFull) {
+ // If the block were full, but became empty enough to use,
+ // move it to the front of the list
+ if (isFull && !adjustFullness()) {
if (!bin)
bin = tlsPtr->getAllocationBin(objectSize);
bin->moveBlockToFront(this);
delivers a clean result. */
static char VersionString[] = "\0" TBBMALLOC_VERSION_STRINGS;
-void AllocControlledMode::initReadEnv(const char *envName, intptr_t defaultVal)
-{
- if (!setDone) {
-#if !__TBB_WIN8UI_SUPPORT
- // TODO: use strtol to get the actual value of the envirable
- const char *envVal = getenv(envName);
- if (envVal && !strcmp(envVal, "1"))
- val = 1;
- else
-#endif
- val = defaultVal;
- setDone = true;
- }
-}
-
#if USE_PTHREAD && (__TBB_SOURCE_DIRECTLY_INCLUDED || __TBB_USE_DLOPEN_REENTRANCY_WORKAROUND)
/* Decrease race interval between dynamic library unloading and pthread key
return isMallocInitialized();
}
-/** Caller is responsible for ensuring this routine is called exactly once. */
+/* Caller is responsible for ensuring this routine is called exactly once. */
extern "C" void MallocInitializeITT() {
#if DO_ITT_NOTIFY
if (!usedBySrcIncluded)
return TBBMALLOC_INVALID_PARAM;
}
#endif
+ } else if (param == TBBMALLOC_SET_HUGE_SIZE_THRESHOLD) {
+ defaultMemPool->extMemPool.loc.setHugeSizeThreshold((size_t)value);
+ return TBBMALLOC_OK;
}
return TBBMALLOC_INVALID_PARAM;
}
switch(cmd) {
case TBBMALLOC_CLEAN_THREAD_BUFFERS:
if (TLSData *tls = defaultMemPool->getTLS(/*create=*/false))
- released = tls->externalCleanup(/*cleanOnlyUsed*/false, /*cleanBins=*/true);
+ released = tls->externalCleanup(/*cleanOnlyUnused*/false, /*cleanBins=*/true);
break;
case TBBMALLOC_CLEAN_ALL_BUFFERS:
released = defaultMemPool->extMemPool.hardCachesCleanup();
*/
#include "tbbmalloc_internal.h"
+#include "tbb/tbb_environment.h"
-/********* Allocation of large objects ************/
-
+/******************************* Allocation of large objects *********************************************/
namespace rml {
namespace internal {
+/* ---------------------------- Large Object cache init section ---------------------------------------- */
+
+void LargeObjectCache::init(ExtMemoryPool *memPool)
+{
+ extMemPool = memPool;
+ // scalable_allocation_mode can be called before allocator initialization, respect this manual request
+ if (hugeSizeThreshold == 0) {
+ // Huge size threshold initialization if environment variable was set
+ long requestedThreshold = tbb::internal::GetIntegralEnvironmentVariable("TBB_MALLOC_SET_HUGE_SIZE_THRESHOLD");
+ // Read valid env or initialize by default with max possible values
+ if (requestedThreshold != -1) {
+ setHugeSizeThreshold(requestedThreshold);
+ } else {
+ setHugeSizeThreshold(maxHugeSize);
+ }
+ }
+}
+
+/* ----------------------------- Huge size threshold settings ----------------------------------------- */
+
+void LargeObjectCache::setHugeSizeThreshold(size_t value)
+{
+ // Valid in the huge cache range: [MaxLargeSize, MaxHugeSize].
+ if (value <= maxHugeSize) {
+ hugeSizeThreshold = value >= maxLargeSize ? alignToBin(value) : maxLargeSize;
+
+ // Calculate local indexes for the global threshold size (for fast search inside a regular cleanup)
+ largeCache.hugeSizeThresholdIdx = LargeCacheType::numBins;
+ hugeCache.hugeSizeThresholdIdx = HugeCacheType::sizeToIdx(hugeSizeThreshold);
+ }
+}
+
+bool LargeObjectCache::sizeInCacheRange(size_t size)
+{
+ return size <= maxHugeSize && (size <= defaultMaxHugeSize || size >= hugeSizeThreshold);
+}
+
+/* ----------------------------------------------------------------------------------------------------- */
/* The functor called by the aggregator for the operation list */
template<typename Props>
uintptr_t getCurrTime() const { return currTime; }
};
-// ---------------- Cache Bin Aggregator Operation Helpers ---------------- //
-// The list of possible operations.
-enum CacheBinOperationType {
- CBOP_INVALID = 0,
- CBOP_GET,
- CBOP_PUT_LIST,
- CBOP_CLEAN_TO_THRESHOLD,
- CBOP_CLEAN_ALL,
- CBOP_UPDATE_USED_SIZE
-};
-
-// The operation status list. CBST_NOWAIT can be specified for non-blocking operations.
-enum CacheBinOperationStatus {
- CBST_WAIT = 0,
- CBST_NOWAIT,
- CBST_DONE
-};
+/* ---------------- Cache Bin Aggregator Operation Helpers ---------------- */
// The list of structures which describe the operation data
struct OpGet {
OpTypeData& opCast(CacheBinOperation &op) {
return *reinterpret_cast<OpTypeData*>(&op.data);
}
-// ------------------------------------------------------------------------ //
+
+/* ------------------------------------------------------------------------ */
#if __TBB_MALLOC_LOCACHE_STAT
intptr_t mallocCalls, cacheHits;
#if __TBB_MALLOC_WHITEBOX_TEST
tbbmalloc_whitebox::locPutProcessed+=prep.putListNum;
#endif
- toRelease = bin->putList(prep.head, prep.tail, bitMask, idx, prep.putListNum);
+ toRelease = bin->putList(prep.head, prep.tail, bitMask, idx, prep.putListNum, extMemPool->loc.hugeSizeThreshold);
}
needCleanup = extMemPool->loc.isCleanupNeededOnRange(timeRange, startTime);
currTime = endTime - 1;
CacheBinFunctor<Props> func( this, extMemPool, bitMask, idx );
aggregator.execute( op, func, longLifeTime );
- if ( LargeMemoryBlock *toRelease = func.getToRelease() )
+ if ( LargeMemoryBlock *toRelease = func.getToRelease()) {
extMemPool->backend.returnLargeObject(toRelease);
+ }
- if ( func.isCleanupNeeded() )
+ if ( func.isCleanupNeeded() ) {
extMemPool->loc.doCleanup( func.getCurrTime(), /*doThreshDecr=*/false);
+ }
}
template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>::
}
template<typename Props> void LargeObjectCacheImpl<Props>::
- CacheBin::updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx) {
+ CacheBin::updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx)
+{
OpUpdateUsedSize data = {size};
CacheBinOperation op(data);
ExecuteOperation( &op, extMemPool, bitMask, idx );
}
-/* ----------------------------------------------------------------------------------------------------- */
+
/* ------------------------------ Unsafe methods used with the aggregator ------------------------------ */
+
template<typename Props> LargeMemoryBlock *LargeObjectCacheImpl<Props>::
- CacheBin::putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num)
+ CacheBin::putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num, size_t hugeSizeThreshold)
{
size_t size = head->unalignedSize;
usedSize -= num*size;
MALLOC_ASSERT( !last || (last->age != 0 && last->age != -1U), ASSERT_TEXT );
MALLOC_ASSERT( (tail==head && num==1) || (tail!=head && num>1), ASSERT_TEXT );
LargeMemoryBlock *toRelease = NULL;
- if (!lastCleanedAge) {
+ if (size < hugeSizeThreshold && !lastCleanedAge) {
// 1st object of such size was released.
// Not cache it, and remember when this occurs
// to take into account during cache miss.
return result;
}
-// forget the history for the bin if it was unused for long time
template<typename Props> void LargeObjectCacheImpl<Props>::
CacheBin::forgetOutdatedState(uintptr_t currTime)
{
bool doCleanup = false;
if (ageThreshold)
- doCleanup = sinceLastGet > Props::LongWaitFactor*ageThreshold;
+ doCleanup = sinceLastGet > Props::LongWaitFactor * ageThreshold;
else if (lastCleanedAge)
- doCleanup = sinceLastGet > Props::LongWaitFactor*(lastCleanedAge - lastGet);
+ doCleanup = sinceLastGet > Props::LongWaitFactor * (lastCleanedAge - lastGet);
if (doCleanup) {
lastCleanedAge = 0;
return toRelease;
}
+
/* ----------------------------------------------------------------------------------------------------- */
template<typename Props> size_t LargeObjectCacheImpl<Props>::
return cachedSize;
}
-// release from cache blocks that are older than ageThreshold
+// Release objects from cache blocks that are older than ageThreshold
template<typename Props>
bool LargeObjectCacheImpl<Props>::regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currTime, bool doThreshDecr)
{
bool released = false;
BinsSummary binsSummary;
- for (int i = bitMask.getMaxTrue(numBins-1); i >= 0;
- i = bitMask.getMaxTrue(i-1)) {
+ // Threshold settings is below this cache or starts from zero index
+ if (hugeSizeThresholdIdx == 0) return false;
+
+ // Starting searching for bin that is less than huge size threshold (can be cleaned-up)
+ int startSearchIdx = hugeSizeThresholdIdx - 1;
+
+ for (int i = bitMask.getMaxTrue(startSearchIdx); i >= 0; i = bitMask.getMaxTrue(i-1)) {
bin[i].updateBinsSummary(&binsSummary);
- if (!doThreshDecr && tooLargeLOC>2 && binsSummary.isLOCTooLarge()) {
+ if (!doThreshDecr && tooLargeLOC > 2 && binsSummary.isLOCTooLarge()) {
// if LOC is too large for quite long time, decrease the threshold
// based on bin hit statistics.
// For this, redo cleanup from the beginning.
// Note: on this iteration total usedSz can be not too large
// in comparison to total cachedSz, as we calculated it only
// partially. We are ok with it.
- i = bitMask.getMaxTrue(numBins-1)+1;
+ i = bitMask.getMaxTrue(startSearchIdx)+1;
doThreshDecr = true;
binsSummary.reset();
continue;
}
if (doThreshDecr)
bin[i].decreaseThreshold();
- if (bin[i].cleanToThreshold(extMemPool, &bitMask, currTime, i))
+
+ if (bin[i].cleanToThreshold(extMemPool, &bitMask, currTime, i)) {
released = true;
+ }
}
-
// We want to find if LOC was too large for some time continuously,
// so OK with races between incrementing and zeroing, but incrementing
// must be atomic.
bool LargeObjectCacheImpl<Props>::cleanAll(ExtMemoryPool *extMemPool)
{
bool released = false;
- for (int i = numBins-1; i >= 0; i--)
+ for (int i = numBins-1; i >= 0; i--) {
released |= bin[i].releaseAllToBackend(extMemPool, &bitMask, i);
+ }
return released;
}
+template<typename Props>
+void LargeObjectCacheImpl<Props>::reset() {
+ tooLargeLOC = 0;
+ for (int i = numBins-1; i >= 0; i--)
+ bin[i].init();
+ bitMask.reset();
+}
+
#if __TBB_MALLOC_WHITEBOX_TEST
template<typename Props>
size_t LargeObjectCacheImpl<Props>::getLOCSize() const
return largeCache.cleanAll(extMemPool) | hugeCache.cleanAll(extMemPool);
}
+void LargeObjectCache::reset()
+{
+ largeCache.reset();
+ hugeCache.reset();
+}
+
template<typename Props>
LargeMemoryBlock *LargeObjectCacheImpl<Props>::get(ExtMemoryPool *extMemoryPool, size_t size)
{
- MALLOC_ASSERT( size%Props::CacheStep==0, ASSERT_TEXT );
- int idx = sizeToIdx(size);
+ int idx = Props::sizeToIdx(size);
LargeMemoryBlock *lmb = bin[idx].get(extMemoryPool, size, &bitMask, idx);
template<typename Props>
void LargeObjectCacheImpl<Props>::updateCacheState(ExtMemoryPool *extMemPool, DecreaseOrIncrease op, size_t size)
{
- int idx = sizeToIdx(size);
+ int idx = Props::sizeToIdx(size);
MALLOC_ASSERT(idx<numBins, ASSERT_TEXT);
bin[idx].updateUsedSize(extMemPool, op==decrease? -size : size, &bitMask, idx);
}
template<typename Props>
void LargeObjectCacheImpl<Props>::putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *toCache)
{
- int toBinIdx = sizeToIdx(toCache->unalignedSize);
+ int toBinIdx = Props::sizeToIdx(toCache->unalignedSize);
MALLOC_ITT_SYNC_RELEASING(bin+toBinIdx);
bin[toBinIdx].putList(extMemPool, toCache, &bitMask, toBinIdx);
hugeCache.updateCacheState(extMemPool, op, size);
}
+uintptr_t LargeObjectCache::getCurrTime()
+{
+ return (uintptr_t)AtomicIncrement((intptr_t&)cacheCurrTime);
+}
+
+uintptr_t LargeObjectCache::getCurrTimeRange(uintptr_t range)
+{
+ return (uintptr_t)AtomicAdd((intptr_t&)cacheCurrTime, range) + 1;
+}
+
void LargeObjectCache::registerRealloc(size_t oldSize, size_t newSize)
{
updateCacheState(decrease, oldSize);
- updateCacheState(increase, newSize);
+ updateCacheState(increase, alignToBin(newSize));
+}
+
+size_t LargeObjectCache::alignToBin(size_t size) {
+ return size < maxLargeSize ? LargeCacheType::alignToBin(size) : HugeCacheType::alignToBin(size);
}
-// return artificial bin index, it's used only during sorting and never saved
+// Used for internal purpose
int LargeObjectCache::sizeToIdx(size_t size)
{
- MALLOC_ASSERT(size < maxHugeSize, ASSERT_TEXT);
- return size < maxLargeSize?
+ MALLOC_ASSERT(size <= maxHugeSize, ASSERT_TEXT);
+ return size < maxLargeSize ?
LargeCacheType::sizeToIdx(size) :
- LargeCacheType::getNumBins()+HugeCacheType::sizeToIdx(size);
+ LargeCacheType::numBins + HugeCacheType::sizeToIdx(size);
}
void LargeObjectCache::putList(LargeMemoryBlock *list)
for (LargeMemoryBlock *curr = list; curr; curr = toProcess) {
LargeMemoryBlock *tail = curr;
toProcess = curr->next;
- if (curr->unalignedSize >= maxHugeSize) {
+ if (!sizeInCacheRange(curr->unalignedSize)) {
extMemPool->backend.returnLargeObject(curr);
continue;
}
void LargeObjectCache::put(LargeMemoryBlock *largeBlock)
{
- if (largeBlock->unalignedSize < maxHugeSize) {
+ size_t blockSize = largeBlock->unalignedSize;
+ if (sizeInCacheRange(blockSize)) {
largeBlock->next = NULL;
- if (largeBlock->unalignedSize<maxLargeSize)
+ if (blockSize < maxLargeSize)
largeCache.putList(extMemPool, largeBlock);
else
hugeCache.putList(extMemPool, largeBlock);
- } else
+ } else {
extMemPool->backend.returnLargeObject(largeBlock);
+ }
}
LargeMemoryBlock *LargeObjectCache::get(size_t size)
{
- MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT );
- MALLOC_ASSERT( size>=minLargeSize, ASSERT_TEXT );
-
- if ( size < maxHugeSize) {
- return size < maxLargeSize?
+ MALLOC_ASSERT( size >= minLargeSize, ASSERT_TEXT );
+ if (sizeInCacheRange(size)) {
+ return size < maxLargeSize ?
largeCache.get(extMemPool, size) : hugeCache.get(extMemPool, size);
}
return NULL;
void *o = backend.remap(ptr, oldSize, newSize, alignment);
if (o) {
LargeMemoryBlock *lmb = ((LargeObjectHdr*)o - 1)->memoryBlock;
- loc.registerRealloc(lmb->unalignedSize, oldUnalignedSize);
+ loc.registerRealloc(oldUnalignedSize, lmb->unalignedSize);
}
return o;
}
--- /dev/null
+/*
+ Copyright (c) 2005-2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef __TBB_tbbmalloc_internal_H
+ #error tbbmalloc_internal.h must be included at this point
+#endif
+
+#ifndef __TBB_large_objects_H
+#define __TBB_large_objects_H
+
+//! The list of possible Cache Bin Aggregator operations.
+/* Declared here to avoid Solaris Studio* 12.2 "multiple definitions" error */
+enum CacheBinOperationType {
+ CBOP_INVALID = 0,
+ CBOP_GET,
+ CBOP_PUT_LIST,
+ CBOP_CLEAN_TO_THRESHOLD,
+ CBOP_CLEAN_ALL,
+ CBOP_UPDATE_USED_SIZE
+};
+
+// The Cache Bin Aggregator operation status list.
+// CBST_NOWAIT can be specified for non-blocking operations.
+enum CacheBinOperationStatus {
+ CBST_WAIT = 0,
+ CBST_NOWAIT,
+ CBST_DONE
+};
+
+/*
+ * Bins that grow with arithmetic step
+ */
+template<size_t MIN_SIZE, size_t MAX_SIZE>
+struct LargeBinStructureProps {
+public:
+ static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE;
+ static const size_t CacheStep = 8 * 1024;
+ static const unsigned NumBins = (MaxSize - MinSize) / CacheStep;
+
+ static size_t alignToBin(size_t size) {
+ return alignUp(size, CacheStep);
+ }
+
+ static int sizeToIdx(size_t size) {
+ MALLOC_ASSERT(MinSize <= size && size < MaxSize, ASSERT_TEXT);
+ MALLOC_ASSERT(size % CacheStep == 0, ASSERT_TEXT);
+ return (size - MinSize) / CacheStep;
+ }
+};
+
+/*
+ * Bins that grow with special geometric progression.
+ */
+template<size_t MIN_SIZE, size_t MAX_SIZE>
+struct HugeBinStructureProps {
+
+private:
+ // Sizes grow with the following formula: Size = MinSize * (2 ^ (Index / StepFactor))
+ // There are StepFactor bins (8 be default) between each power of 2 bin
+ static const int MaxSizeExp = Log2<MAX_SIZE>::value;
+ static const int MinSizeExp = Log2<MIN_SIZE>::value;
+ static const int StepFactor = 8;
+ static const int StepFactorExp = Log2<StepFactor>::value;
+
+public:
+ static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE;
+ static const unsigned NumBins = (MaxSizeExp - MinSizeExp) * StepFactor;
+
+ static size_t alignToBin(size_t size) {
+ size_t minorStepExp = BitScanRev(size) - StepFactorExp;
+ return alignUp(size, 1ULL << minorStepExp);
+ }
+
+ // Sizes between the power of 2 values are aproximated to StepFactor.
+ static int sizeToIdx(size_t size) {
+ MALLOC_ASSERT(MinSize <= size && size <= MaxSize, ASSERT_TEXT);
+ int sizeExp = (int)BitScanRev(size); // same as __TBB_Log2
+ size_t majorStepSize = 1ULL << sizeExp;
+ int minorStepExp = sizeExp - StepFactorExp;
+ int minorIdx = (size - majorStepSize) >> minorStepExp;
+ MALLOC_ASSERT(size == majorStepSize + ((size_t)minorIdx << minorStepExp),
+ "Size is not aligned on the bin");
+ return StepFactor * (sizeExp - MinSizeExp) + minorIdx;
+ }
+};
+
+/*
+ * Cache properties accessor
+ *
+ * TooLargeFactor -- when cache size treated "too large" in comparison to user data size
+ * OnMissFactor -- If cache miss occurred and cache was cleaned,
+ * set ageThreshold to OnMissFactor * the difference
+ * between current time and last time cache was cleaned.
+ * LongWaitFactor -- to detect rarely-used bins and forget about their usage history
+ */
+template<typename StructureProps, int TOO_LARGE, int ON_MISS, int LONG_WAIT>
+struct LargeObjectCacheProps : public StructureProps {
+ static const int TooLargeFactor = TOO_LARGE, OnMissFactor = ON_MISS, LongWaitFactor = LONG_WAIT;
+};
+
+template<typename Props>
+class LargeObjectCacheImpl {
+private:
+
+ // Current sizes of used and cached objects. It's calculated while we are
+ // traversing bins, and used for isLOCTooLarge() check at the same time.
+ class BinsSummary {
+ size_t usedSz;
+ size_t cachedSz;
+ public:
+ BinsSummary() : usedSz(0), cachedSz(0) {}
+ // "too large" criteria
+ bool isLOCTooLarge() const { return cachedSz > Props::TooLargeFactor * usedSz; }
+ void update(size_t usedSize, size_t cachedSize) {
+ usedSz += usedSize;
+ cachedSz += cachedSize;
+ }
+ void reset() { usedSz = cachedSz = 0; }
+ };
+
+public:
+ // The number of bins to cache large/huge objects.
+ static const uint32_t numBins = Props::NumBins;
+
+ typedef BitMaskMax<numBins> BinBitMask;
+
+ // 2-linked list of same-size cached blocks ordered by age (oldest on top)
+ // TODO: are we really want the list to be 2-linked? This allows us
+ // reduce memory consumption and do less operations under lock.
+ // TODO: try to switch to 32-bit logical time to save space in CacheBin
+ // and move bins to different cache lines.
+ class CacheBin {
+ private:
+ LargeMemoryBlock *first,
+ *last;
+ /* age of an oldest block in the list; equal to last->age, if last defined,
+ used for quick cheching it without acquiring the lock. */
+ uintptr_t oldest;
+ /* currAge when something was excluded out of list because of the age,
+ not because of cache hit */
+ uintptr_t lastCleanedAge;
+ /* Current threshold value for the blocks of a particular size.
+ Set on cache miss. */
+ intptr_t ageThreshold;
+
+ /* total size of all objects corresponding to the bin and allocated by user */
+ size_t usedSize,
+ /* total size of all objects cached in the bin */
+ cachedSize;
+ /* mean time of presence of block in the bin before successful reuse */
+ intptr_t meanHitRange;
+ /* time of last get called for the bin */
+ uintptr_t lastGet;
+
+ typename MallocAggregator<CacheBinOperation>::type aggregator;
+
+ void ExecuteOperation(CacheBinOperation *op, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx, bool longLifeTime = true);
+
+ /* should be placed in zero-initialized memory, ctor not needed. */
+ CacheBin();
+
+ public:
+ void init() {
+ memset(this, 0, sizeof(CacheBin));
+ }
+
+ /* ---------- Cache accessors ---------- */
+ void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *head, BinBitMask *bitMask, int idx);
+ LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx);
+
+ /* ---------- Cleanup functions -------- */
+ bool cleanToThreshold(ExtMemoryPool *extMemPool, BinBitMask *bitMask, uintptr_t currTime, int idx);
+ bool releaseAllToBackend(ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx);
+ /* ------------------------------------- */
+
+ void updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx);
+ void decreaseThreshold() {
+ if (ageThreshold)
+ ageThreshold = (ageThreshold + meanHitRange) / 2;
+ }
+ void updateBinsSummary(BinsSummary *binsSummary) const {
+ binsSummary->update(usedSize, cachedSize);
+ }
+ size_t getSize() const { return cachedSize; }
+ size_t getUsedSize() const { return usedSize; }
+ size_t reportStat(int num, FILE *f);
+
+ /* --------- Unsafe methods used with the aggregator ------- */
+ void forgetOutdatedState(uintptr_t currTime);
+ LargeMemoryBlock *putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask,
+ int idx, int num, size_t hugeObjectThreshold);
+ LargeMemoryBlock *get();
+ LargeMemoryBlock *cleanToThreshold(uintptr_t currTime, BinBitMask *bitMask, int idx);
+ LargeMemoryBlock *cleanAll(BinBitMask *bitMask, int idx);
+ void updateUsedSize(size_t size, BinBitMask *bitMask, int idx) {
+ if (!usedSize) bitMask->set(idx, true);
+ usedSize += size;
+ if (!usedSize && !first) bitMask->set(idx, false);
+ }
+ void updateMeanHitRange( intptr_t hitRange ) {
+ hitRange = hitRange >= 0 ? hitRange : 0;
+ meanHitRange = meanHitRange ? (meanHitRange + hitRange) / 2 : hitRange;
+ }
+ void updateAgeThreshold( uintptr_t currTime ) {
+ if (lastCleanedAge)
+ ageThreshold = Props::OnMissFactor*(currTime - lastCleanedAge);
+ }
+ void updateCachedSize(size_t size) {
+ cachedSize += size;
+ }
+ void setLastGet( uintptr_t newLastGet ) {
+ lastGet = newLastGet;
+ }
+ /* -------------------------------------------------------- */
+ };
+
+ // Huge bins index for fast regular cleanup searching in case of
+ // the "huge size threshold" setting defined
+ intptr_t hugeSizeThresholdIdx;
+
+private:
+ // How many times LOC was "too large"
+ intptr_t tooLargeLOC;
+ // for fast finding of used bins and bins with non-zero usedSize;
+ // indexed from the end, as we need largest 1st
+ BinBitMask bitMask;
+ // bins with lists of recently freed large blocks cached for re-use
+ CacheBin bin[numBins];
+
+public:
+ /* ------------ CacheBin structure dependent stuff ------------ */
+ static size_t alignToBin(size_t size) {
+ return Props::alignToBin(size);
+ }
+ static int sizeToIdx(size_t size) {
+ return Props::sizeToIdx(size);
+ }
+
+ /* --------- Main cache functions (put, get object) ------------ */
+ void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock);
+ LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size);
+
+ /* ------------------------ Cleanup ---------------------------- */
+ bool regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currAge, bool doThreshDecr);
+ bool cleanAll(ExtMemoryPool *extMemPool);
+
+ /* -------------------------- Other ---------------------------- */
+ void updateCacheState(ExtMemoryPool *extMemPool, DecreaseOrIncrease op, size_t size);
+
+ void reset();
+ void reportStat(FILE *f);
+#if __TBB_MALLOC_WHITEBOX_TEST
+ size_t getLOCSize() const;
+ size_t getUsedSize() const;
+#endif
+};
+
+class LargeObjectCache {
+private:
+ // Large bins [minLargeSize, maxLargeSize)
+ // Huge bins [maxLargeSize, maxHugeSize)
+ static const size_t minLargeSize = 8 * 1024,
+ maxLargeSize = 8 * 1024 * 1024,
+ // Cache memory up to 1TB (or 2GB for 32-bit arch), but sieve objects from the special threshold
+ maxHugeSize = tbb::internal::select_size_t_constant<2147483648U, 1099511627776ULL>::value;
+
+public:
+ // Upper bound threshold for caching size. After that size all objects sieve through cache
+ // By default - 64MB, previous value was 129MB (needed by some Intel(R) Math Kernel Library (Intel(R) MKL) benchmarks)
+ static const size_t defaultMaxHugeSize = 64UL * 1024UL * 1024UL;
+ // After that size large object interpreted as huge and does not participate in regular cleanup.
+ // Can be changed during the program execution.
+ size_t hugeSizeThreshold;
+
+private:
+ // Large objects cache properties
+ typedef LargeBinStructureProps<minLargeSize, maxLargeSize> LargeBSProps;
+ typedef LargeObjectCacheProps<LargeBSProps, 2, 2, 16> LargeCacheTypeProps;
+
+ // Huge objects cache properties
+ typedef HugeBinStructureProps<maxLargeSize, maxHugeSize> HugeBSProps;
+ typedef LargeObjectCacheProps<HugeBSProps, 1, 1, 4> HugeCacheTypeProps;
+
+ // Cache implementation type with properties
+ typedef LargeObjectCacheImpl< LargeCacheTypeProps > LargeCacheType;
+ typedef LargeObjectCacheImpl< HugeCacheTypeProps > HugeCacheType;
+
+ // Beginning of largeCache is more actively used and smaller than hugeCache,
+ // so put hugeCache first to prevent false sharing
+ // with LargeObjectCache's predecessor
+ HugeCacheType hugeCache;
+ LargeCacheType largeCache;
+
+ /* logical time, incremented on each put/get operation
+ To prevent starvation between pools, keep separately for each pool.
+ Overflow is OK, as we only want difference between
+ its current value and some recent.
+
+ Both malloc and free should increment logical time, as in
+ a different case multiple cached blocks would have same age,
+ and accuracy of predictors suffers.
+ */
+ uintptr_t cacheCurrTime;
+
+ // Memory pool that owns this LargeObjectCache.
+ // strict 1:1 relation, never changed
+ ExtMemoryPool *extMemPool;
+
+ // Returns artificial bin index,
+ // it's used only during sorting and never saved
+ static int sizeToIdx(size_t size);
+
+ // Our friends
+ friend class Backend;
+
+public:
+ void init(ExtMemoryPool *memPool);
+
+ // Item accessors
+ void put(LargeMemoryBlock *largeBlock);
+ void putList(LargeMemoryBlock *head);
+ LargeMemoryBlock *get(size_t size);
+
+ void updateCacheState(DecreaseOrIncrease op, size_t size);
+ bool isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime);
+
+ // Cleanup operations
+ bool doCleanup(uintptr_t currTime, bool doThreshDecr);
+ bool decreasingCleanup();
+ bool regularCleanup();
+ bool cleanAll();
+ void reset();
+
+ void reportStat(FILE *f);
+#if __TBB_MALLOC_WHITEBOX_TEST
+ size_t getLOCSize() const;
+ size_t getUsedSize() const;
+#endif
+
+ // Cache deals with exact-fit sizes, so need to align each size
+ // to the specific bin when put object to cache
+ static size_t alignToBin(size_t size);
+
+ void setHugeSizeThreshold(size_t value);
+
+ // Check if we should cache or sieve this size
+ bool sizeInCacheRange(size_t size);
+
+ uintptr_t getCurrTime();
+ uintptr_t getCurrTimeRange(uintptr_t range);
+ void registerRealloc(size_t oldSize, size_t newSize);
+};
+
+#endif // __TBB_large_objects_H
+
return N;
}
+/*
+ * Compile time Log2 calculation
+ */
+template <size_t NUM>
+struct Log2 { static const int value = 1 + Log2<(NUM >> 1)>::value; };
+template <>
+struct Log2<1> { static const int value = 0; };
+
#if defined(min)
#undef min
#endif
#pragma warning(disable:4510 4512 4610)
#endif
+#if __SUNPRO_CC
+ // Suppress overzealous compiler warnings that a class with a reference member
+ // lacks a user-defined constructor, which can lead to errors
+ #pragma error_messages (off, refmemnoconstr)
+#endif
+
+// TODO: add a constructor to remove warnings suppression
struct parseFileItem {
const char* format;
unsigned long long& value;
#pragma warning(pop)
#endif
+#if __SUNPRO_CC
+ #pragma error_messages (on, refmemnoconstr)
+#endif
+
template <int BUF_LINE_SIZE, int N>
void parseFile(const char* file, const parseFileItem (&items)[N]) {
// Tries to find all items in each line
*/
#ifndef __TBB_tbbmalloc_internal_H
-#define __TBB_tbbmalloc_internal_H 1
+#define __TBB_tbbmalloc_internal_H
#include "TypeDefinitions.h" /* Also includes customization layer Customize.h */
bool cleanup(Backend* backend);
};
-/* cache blocks in range [MinSize; MaxSize) in bins with CacheStep
- TooLargeFactor -- when cache size treated "too large" in comparison to user data size
- OnMissFactor -- If cache miss occurred and cache was cleaned,
- set ageThreshold to OnMissFactor * the difference
- between current time and last time cache was cleaned.
- LongWaitFactor -- to detect rarely-used bins and forget about their usage history
-*/
-template<size_t MIN_SIZE, size_t MAX_SIZE, uint32_t CACHE_STEP, int TOO_LARGE,
- int ON_MISS, int LONG_WAIT>
-struct LargeObjectCacheProps {
- static const size_t MinSize = MIN_SIZE, MaxSize = MAX_SIZE;
- static const uint32_t CacheStep = CACHE_STEP;
- static const int TooLargeFactor = TOO_LARGE, OnMissFactor = ON_MISS,
- LongWaitFactor = LONG_WAIT;
-};
-
-template<typename Props>
-class LargeObjectCacheImpl {
-private:
- // The number of bins to cache large objects.
- static const uint32_t numBins = (Props::MaxSize-Props::MinSize)/Props::CacheStep;
- // Current sizes of used and cached objects. It's calculated while we are
- // traversing bins, and used for isLOCTooLarge() check at the same time.
- class BinsSummary {
- size_t usedSz;
- size_t cachedSz;
- public:
- BinsSummary() : usedSz(0), cachedSz(0) {}
- // "too large" criteria
- bool isLOCTooLarge() const { return cachedSz > Props::TooLargeFactor*usedSz; }
- void update(size_t usedSize, size_t cachedSize) {
- usedSz += usedSize;
- cachedSz += cachedSize;
- }
- void reset() { usedSz = cachedSz = 0; }
- };
-public:
- typedef BitMaskMax<numBins> BinBitMask;
-
- // 2-linked list of same-size cached blocks ordered by age (oldest on top)
- // TODO: are we really want the list to be 2-linked? This allows us
- // reduce memory consumption and do less operations under lock.
- // TODO: try to switch to 32-bit logical time to save space in CacheBin
- // and move bins to different cache lines.
- class CacheBin {
- private:
- LargeMemoryBlock *first,
- *last;
- /* age of an oldest block in the list; equal to last->age, if last defined,
- used for quick cheching it without acquiring the lock. */
- uintptr_t oldest;
- /* currAge when something was excluded out of list because of the age,
- not because of cache hit */
- uintptr_t lastCleanedAge;
- /* Current threshold value for the blocks of a particular size.
- Set on cache miss. */
- intptr_t ageThreshold;
-
- /* total size of all objects corresponding to the bin and allocated by user */
- size_t usedSize,
- /* total size of all objects cached in the bin */
- cachedSize;
- /* mean time of presence of block in the bin before successful reuse */
- intptr_t meanHitRange;
- /* time of last get called for the bin */
- uintptr_t lastGet;
-
- typename MallocAggregator<CacheBinOperation>::type aggregator;
-
- void ExecuteOperation(CacheBinOperation *op, ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx, bool longLifeTime = true);
- /* should be placed in zero-initialized memory, ctor not needed. */
- CacheBin();
- public:
- void init() { memset(this, 0, sizeof(CacheBin)); }
- void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *head, BinBitMask *bitMask, int idx);
- LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx);
- bool cleanToThreshold(ExtMemoryPool *extMemPool, BinBitMask *bitMask, uintptr_t currTime, int idx);
- bool releaseAllToBackend(ExtMemoryPool *extMemPool, BinBitMask *bitMask, int idx);
- void updateUsedSize(ExtMemoryPool *extMemPool, size_t size, BinBitMask *bitMask, int idx);
-
- void decreaseThreshold() {
- if (ageThreshold)
- ageThreshold = (ageThreshold + meanHitRange)/2;
- }
- void updateBinsSummary(BinsSummary *binsSummary) const {
- binsSummary->update(usedSize, cachedSize);
- }
- size_t getSize() const { return cachedSize; }
- size_t getUsedSize() const { return usedSize; }
- size_t reportStat(int num, FILE *f);
- /* ---------- unsafe methods used with the aggregator ---------- */
- void forgetOutdatedState(uintptr_t currTime);
- LargeMemoryBlock *putList(LargeMemoryBlock *head, LargeMemoryBlock *tail, BinBitMask *bitMask, int idx, int num);
- LargeMemoryBlock *get();
- LargeMemoryBlock *cleanToThreshold(uintptr_t currTime, BinBitMask *bitMask, int idx);
- LargeMemoryBlock *cleanAll(BinBitMask *bitMask, int idx);
- void updateUsedSize(size_t size, BinBitMask *bitMask, int idx) {
- if (!usedSize) bitMask->set(idx, true);
- usedSize += size;
- if (!usedSize && !first) bitMask->set(idx, false);
- }
- void updateMeanHitRange( intptr_t hitRange ) {
- hitRange = hitRange >= 0 ? hitRange : 0;
- meanHitRange = meanHitRange ? (meanHitRange + hitRange)/2 : hitRange;
- }
- void updateAgeThreshold( uintptr_t currTime ) {
- if (lastCleanedAge)
- ageThreshold = Props::OnMissFactor*(currTime - lastCleanedAge);
- }
- void updateCachedSize(size_t size) { cachedSize += size; }
- void setLastGet( uintptr_t newLastGet ) { lastGet = newLastGet; }
- /* -------------------------------------------------------- */
- };
-private:
- intptr_t tooLargeLOC; // how many times LOC was "too large"
- // for fast finding of used bins and bins with non-zero usedSize;
- // indexed from the end, as we need largest 1st
- BinBitMask bitMask;
- // bins with lists of recently freed large blocks cached for re-use
- CacheBin bin[numBins];
-
-public:
- static int sizeToIdx(size_t size) {
- MALLOC_ASSERT(Props::MinSize <= size && size < Props::MaxSize, ASSERT_TEXT);
- return (size-Props::MinSize)/Props::CacheStep;
- }
- static int getNumBins() { return numBins; }
-
- void putList(ExtMemoryPool *extMemPool, LargeMemoryBlock *largeBlock);
- LargeMemoryBlock *get(ExtMemoryPool *extMemPool, size_t size);
-
- void updateCacheState(ExtMemoryPool *extMemPool, DecreaseOrIncrease op, size_t size);
- bool regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currAge, bool doThreshDecr);
- bool cleanAll(ExtMemoryPool *extMemPool);
- void reset() {
- tooLargeLOC = 0;
- for (int i = numBins-1; i >= 0; i--)
- bin[i].init();
- bitMask.reset();
- }
- void reportStat(FILE *f);
-#if __TBB_MALLOC_WHITEBOX_TEST
- size_t getLOCSize() const;
- size_t getUsedSize() const;
-#endif
-};
-
-class LargeObjectCache {
- static const size_t minLargeSize = 8*1024,
- maxLargeSize = 8*1024*1024,
- // There are benchmarks of interest that should work well with objects of this size
- maxHugeSize = 129*1024*1024;
-public:
- // Difference between object sizes in large block bins
- static const uint32_t largeBlockCacheStep = 8*1024,
- hugeBlockCacheStep = 512*1024;
-private:
- typedef LargeObjectCacheProps<minLargeSize, maxLargeSize, largeBlockCacheStep, 2, 2, 16> LargeCacheTypeProps;
- typedef LargeObjectCacheProps<maxLargeSize, maxHugeSize, hugeBlockCacheStep, 1, 1, 4> HugeCacheTypeProps;
- typedef LargeObjectCacheImpl< LargeCacheTypeProps > LargeCacheType;
- typedef LargeObjectCacheImpl< HugeCacheTypeProps > HugeCacheType;
-
- // beginning of largeCache is more actively used and smaller than hugeCache,
- // so put hugeCache first to prevent false sharing
- // with LargeObjectCache's predecessor
- HugeCacheType hugeCache;
- LargeCacheType largeCache;
-
- /* logical time, incremented on each put/get operation
- To prevent starvation between pools, keep separately for each pool.
- Overflow is OK, as we only want difference between
- its current value and some recent.
-
- Both malloc and free should increment logical time, as in
- a different case multiple cached blocks would have same age,
- and accuracy of predictors suffers.
- */
- uintptr_t cacheCurrTime;
-
- // memory pool that owns this LargeObjectCache,
- ExtMemoryPool *extMemPool; // strict 1:1 relation, never changed
-
- static int sizeToIdx(size_t size);
-public:
- void init(ExtMemoryPool *memPool) { extMemPool = memPool; }
- void put(LargeMemoryBlock *largeBlock);
- void putList(LargeMemoryBlock *head);
- LargeMemoryBlock *get(size_t size);
-
- void updateCacheState(DecreaseOrIncrease op, size_t size);
- bool isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime);
- bool doCleanup(uintptr_t currTime, bool doThreshDecr);
-
- bool decreasingCleanup();
- bool regularCleanup();
- bool cleanAll();
- void reset() {
- largeCache.reset();
- hugeCache.reset();
- }
- void reportStat(FILE *f);
-#if __TBB_MALLOC_WHITEBOX_TEST
- size_t getLOCSize() const;
- size_t getUsedSize() const;
-#endif
- static size_t alignToBin(size_t size) {
- return size<maxLargeSize? alignUp(size, largeBlockCacheStep)
- : alignUp(size, hugeBlockCacheStep);
- }
-
- uintptr_t getCurrTime() { return (uintptr_t)AtomicIncrement((intptr_t&)cacheCurrTime); }
- uintptr_t getCurrTimeRange(uintptr_t range) { return (uintptr_t)AtomicAdd((intptr_t&)cacheCurrTime, range)+1; }
- void registerRealloc(size_t oldSize, size_t newSize);
-};
+/* Large objects entities */
+#include "large_objects.h"
// select index size for BackRefMaster based on word size: default is uint32_t,
// uint16_t for 32-bit platforms
class AllocControlledMode {
intptr_t val;
bool setDone;
+
public:
- bool ready() const { return setDone; }
intptr_t get() const {
MALLOC_ASSERT(setDone, ASSERT_TEXT);
return val;
}
- void set(intptr_t newVal) { // note set() can be called before init()
+
+ // Note: set() can be called before init()
+ void set(intptr_t newVal) {
val = newVal;
setDone = true;
}
+
+ bool ready() const {
+ return setDone;
+ }
+
// envName - environment variable to get controlled mode
- void initReadEnv(const char *envName, intptr_t defaultVal);
+ void initReadEnv(const char *envName, intptr_t defaultVal) {
+ if (!setDone) {
+#if !__TBB_WIN8UI_SUPPORT
+ // TODO: use strtol to get the actual value of the envirable
+ const char *envVal = getenv(envName);
+ if (envVal && !strcmp(envVal, "1"))
+ val = 1;
+ else
+#endif
+ val = defaultVal;
+ setDone = true;
+ }
+ }
};
// Page type to be used inside MapMemory.
#include __TBB_STD_SWAP_HEADER
#include "tbb/atomic.h"
+#include "tbb/tbb_allocator.h"
#if __SUNPRO_CC
using std::printf;
void* unique_pointer;
};
+// C++03 allocator doesn't have to be assignable or swappable, so
+// tbb::internal::allocator_traits defines POCCA and POCS as false_type
+#if __TBB_ALLOCATOR_TRAITS_PRESENT
#include "tbb/internal/_allocator_traits.h" // Need traits_true/false_type
template <typename Allocator, typename POCMA = tbb::internal::traits_false_type,
}
};
+namespace propagating_allocators {
+typedef tbb::tbb_allocator<int> base_allocator;
+typedef tbb::internal::traits_true_type true_type;
+typedef tbb::internal::traits_false_type false_type;
+
+typedef propagating_allocator<base_allocator, /*POCMA=*/true_type, /*POCCA=*/true_type,
+ /*POCS=*/true_type> always_propagating_allocator;
+typedef propagating_allocator<base_allocator, false_type, false_type, false_type> never_propagating_allocator;
+typedef propagating_allocator<base_allocator, true_type, false_type, false_type> pocma_allocator;
+typedef propagating_allocator<base_allocator, false_type, true_type, false_type> pocca_allocator;
+typedef propagating_allocator<base_allocator, false_type, false_type, true_type> pocs_allocator;
+}
+
template <typename Allocator, typename POCMA, typename POCCA, typename POCS>
void swap(propagating_allocator<Allocator, POCMA, POCCA, POCS>& lhs,
propagating_allocator<Allocator, POCMA, POCCA, POCS>&) {
*lhs.propagated_on_swap = true;
}
+template <typename ContainerType>
+void test_allocator_traits_support() {
+ typedef typename ContainerType::allocator_type allocator_type;
+ typedef std::allocator_traits<allocator_type> allocator_traits;
+ typedef typename allocator_traits::propagate_on_container_copy_assignment pocca_type;
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ typedef typename allocator_traits::propagate_on_container_move_assignment pocma_type;
+#endif
+ typedef typename allocator_traits::propagate_on_container_swap pocs_type;
+
+ bool propagated_on_copy = false;
+ bool propagated_on_move = false;
+ bool propagated_on_swap = false;
+ bool selected_on_copy = false;
+
+ allocator_type alloc(propagated_on_copy, propagated_on_move, propagated_on_swap, selected_on_copy);
+
+ ContainerType c1(alloc), c2(c1);
+ ASSERT(selected_on_copy, "select_on_container_copy_construction function was not called");
+
+ c1 = c2;
+ ASSERT(propagated_on_copy == pocca_type::value, "Unexpected allocator propagation on copy assignment");
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ c2 = std::move(c1);
+ ASSERT(propagated_on_move == pocma_type::value, "Unexpected allocator propagation on move assignment");
+#endif
+
+ c1.swap(c2);
+ ASSERT(propagated_on_swap == pocs_type::value, "Unexpected allocator propagation on swap");
+}
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+class non_movable_object {
+ non_movable_object() {}
+private:
+ non_movable_object(non_movable_object&&);
+ non_movable_object& operator=(non_movable_object&&);
+};
+
+template <typename ContainerType>
+void test_allocator_traits_with_non_movable_value_type() {
+ // Check, that if pocma is true, container allows move assignment without per-element move
+ typedef typename ContainerType::allocator_type allocator_type;
+ typedef std::allocator_traits<allocator_type> allocator_traits;
+ typedef typename allocator_traits::propagate_on_container_move_assignment pocma_type;
+ ASSERT(pocma_type::value, "Allocator POCMA must be true for this test");
+ allocator_type alloc;
+ ContainerType container1(alloc), container2(alloc);
+ container1 = std::move(container2);
+}
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
+#endif // __TBB_ALLOCATOR_TRAITS_PRESENT
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+
+template<typename Allocator>
+class allocator_aware_data {
+public:
+ static bool assert_on_constructions;
+ typedef Allocator allocator_type;
+
+ allocator_aware_data(const allocator_type& allocator = allocator_type())
+ : my_allocator(allocator), my_value(0) {}
+ allocator_aware_data(int v, const allocator_type& allocator = allocator_type())
+ : my_allocator(allocator), my_value(v) {}
+ allocator_aware_data(const allocator_aware_data&) {
+ ASSERT(!assert_on_constructions, "Allocator should propogate to the data during copy construction");
+ }
+ allocator_aware_data(allocator_aware_data&&) {
+ ASSERT(!assert_on_constructions, "Allocator should propogate to the data during move construction");
+ }
+ allocator_aware_data(const allocator_aware_data& rhs, const allocator_type& allocator)
+ : my_allocator(allocator), my_value(rhs.my_value) {}
+ allocator_aware_data(allocator_aware_data&& rhs, const allocator_type& allocator)
+ : my_allocator(allocator), my_value(rhs.my_value) {}
+
+ int value() const { return my_value; }
+private:
+ allocator_type my_allocator;
+ int my_value;
+};
+
+template<typename Allocator>
+bool allocator_aware_data<Allocator>::assert_on_constructions = false;
+
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
// Workaround for overzealous compiler warnings
#pragma warning (pop)
#ifndef TBB_PREVIEW_ALGORITHM_TRACE
#define TBB_PREVIEW_ALGORITHM_TRACE 1
#endif
+ #ifndef TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+ #define TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR 1
+ #endif
#endif
namespace Harness {
async_activity* my_activity;
};
- async_activity(int expected_items, int sleep_time = 50) : my_expected_items(expected_items), my_sleep_time(sleep_time) {
+ async_activity(int expected_items, bool deferred = false, int sleep_time = 50)
+ : my_expected_items(expected_items), my_sleep_time(sleep_time) {
+ is_active = !deferred;
my_quit = false;
tbb::tbb_thread( ServiceThreadBody( this ) ).swap( my_service_thread );
}
private:
- async_activity( const async_activity& ) : my_expected_items(UNKNOWN_NUMBER_OF_ITEMS), my_sleep_time(0) { }
+ async_activity( const async_activity& )
+ : my_expected_items(UNKNOWN_NUMBER_OF_ITEMS), my_sleep_time(0) {
+ is_active = true;
+ }
public:
~async_activity() {
void process() {
do {
work_type work;
- if( my_work_queue.try_pop( work ) ) {
+ if( is_active && my_work_queue.try_pop( work ) ) {
Harness::Sleep(my_sleep_time);
++async_activity_processed_msg_count;
output_type output;
wrapper_helper<output_type, output_type>::copy_value(work.input, output);
wrapper_helper<output_type, output_type>::check(work.input, output);
work.gateway->try_put(output);
- if ( my_expected_items == UNKNOWN_NUMBER_OF_ITEMS || int(async_activity_processed_msg_count) == my_expected_items ) {
+ if ( my_expected_items == UNKNOWN_NUMBER_OF_ITEMS ||
+ int(async_activity_processed_msg_count) == my_expected_items ) {
work.gateway->release_wait();
}
}
my_quit = true;
}
+ void activate() {
+ is_active = true;
+ }
+
bool should_reserve_each_time() {
if ( my_expected_items == UNKNOWN_NUMBER_OF_ITEMS )
return true;
const int my_expected_items;
const int my_sleep_time;
+ tbb::atomic< bool > is_active;
tbb::concurrent_queue< work_type > my_work_queue;
spin_test() {}
static int run(int nthreads, int async_expected_items = UNKNOWN_NUMBER_OF_ITEMS) {
- async_activity<input_type, output_type> my_async_activity(async_expected_items, 0);
+ async_activity<input_type, output_type> my_async_activity(async_expected_items, false, 0);
Harness::SpinBarrier spin_barrier(nthreads);
tbb::flow::graph g;
tbb::flow::function_node< int, input_type > start_node( g, tbb::flow::unlimited, start_body_type() );
return Harness::Done;
}
+#include "tbb/parallel_for.h"
+template<typename Input, typename Output>
+class equeueing_on_inner_level {
+ typedef Input input_type;
+ typedef Output output_type;
+ typedef async_activity<input_type, output_type> async_activity_type;
+ typedef tbb::flow::async_node<Input, Output> async_node_type;
+ typedef typename async_node_type::gateway_type gateway_type;
+
+ class start_body_type {
+ public:
+ input_type operator() ( int input ) {
+ return input_type( input);
+ }
+ };
+
+ class async_body_type {
+ public:
+ async_body_type( async_activity_type& activity ) : my_async_activity(&activity) {}
+
+ void operator() ( const input_type &input, gateway_type& gateway ) {
+ gateway.reserve_wait();
+ my_async_activity->submit( input, gateway );
+ }
+ private:
+ async_activity_type* my_async_activity;
+ };
+
+ class end_body_type {
+ public:
+ void operator()( output_type ) {}
+ };
+
+ class body_graph_with_async {
+ public:
+ body_graph_with_async( Harness::SpinBarrier& barrier, async_activity_type& activity )
+ : spin_barrier(&barrier), my_async_activity(&activity) {}
+
+ void operator()(int) const {
+ tbb::flow::graph g;
+ tbb::flow::function_node< int, input_type > start_node( g, tbb::flow::unlimited, start_body_type() );
+
+ async_node_type offload_node( g, tbb::flow::unlimited, async_body_type( *my_async_activity ) );
+
+ tbb::flow::function_node< output_type > end_node( g, tbb::flow::unlimited, end_body_type() );
+
+ tbb::flow::make_edge( start_node, offload_node );
+ tbb::flow::make_edge( offload_node, end_node );
+
+ start_node.try_put(1);
+
+ spin_barrier->wait();
+
+ my_async_activity->activate();
+
+ g.wait_for_all();
+ }
+
+ private:
+ Harness::SpinBarrier* spin_barrier;
+ async_activity_type* my_async_activity;
+ };
+
+
+public:
+ static int run ()
+ {
+ const int nthreads = tbb::this_task_arena::max_concurrency();
+ Harness::SpinBarrier spin_barrier( nthreads );
+
+ async_activity_type my_async_activity( UNKNOWN_NUMBER_OF_ITEMS, true );
+
+ tbb::parallel_for( 0, nthreads, body_graph_with_async( spin_barrier, my_async_activity ) );
+ return Harness::Done;
+ }
+};
+
+int run_test_equeueing_on_inner_level() {
+ equeueing_on_inner_level<int, int>::run();
+ return Harness::Done;
+}
+
int TestMain() {
tbb::task_scheduler_init init(4);
run_tests<int, int>();
test_reset();
test_copy_ctor();
test_for_spin_avoidance();
+ run_test_equeueing_on_inner_level();
return Harness::Done;
}
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/* Some tests in this source file are based on PPL tests provided by Microsoft. */
+#include "tbb/parallel_for.h"
+#include "tbb/tick_count.h"
+#include "harness.h"
+#include "test_container_move_support.h"
+// Test that unordered containers do not require keys have default constructors.
+#define __HARNESS_CHECKTYPE_DEFAULT_CTOR 0
+#include "harness_checktype.h"
+#undef __HARNESS_CHECKTYPE_DEFAULT_CTOR
+#include "harness_allocator.h"
+
+#if _MSC_VER
+#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced
+#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it
+#endif
+
+// TestInitListSupportWithoutAssign with an empty initializer list causes internal error in Intel Compiler.
+#define __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN (__INTEL_COMPILER && __INTEL_COMPILER <= 1500)
+
+typedef local_counting_allocator<debug_allocator<std::pair<const int,int>,std::allocator> > MyAllocator;
+
+template<typename Table>
+inline void CheckAllocator(typename Table::allocator_type& a, size_t expected_allocs, size_t expected_frees,
+ bool exact = true) {
+ if(exact) {
+ ASSERT( a.allocations == expected_allocs, NULL); ASSERT( a.frees == expected_frees, NULL);
+ } else {
+ ASSERT( a.allocations >= expected_allocs, NULL); ASSERT( a.frees >= expected_frees, NULL);
+ ASSERT( a.allocations - a.frees == expected_allocs - expected_frees, NULL );
+ }
+}
+
+// Check that only dummy node allocated if table is empty
+// Specialize this function for custom container, if it node allocation size > 1
+#define CheckEmptyContainerAllocatorE(t,a,f) CheckEmptyContainerAllocator(t,a,f,true,__LINE__)
+#define CheckEmptyContainerAllocatorA(t,a,f) CheckEmptyContainerAllocator(t,a,f,false,__LINE__)
+template<typename MyTable>
+inline void CheckEmptyContainerAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true, int line = 0);
+
+template<typename T>
+struct strip_const { typedef T type; };
+
+template<typename T>
+struct strip_const<const T> { typedef T type; };
+
+// value generator for map
+template <typename K, typename V = std::pair<const K, K> >
+struct ValueFactory {
+ typedef typename strip_const<K>::type Kstrip;
+ static V make(const K &value) { return V(value, value); }
+ static Kstrip key(const V &value) { return value.first; }
+ static Kstrip get(const V &value) { return (Kstrip)value.second; }
+ template< typename U >
+ static U convert(const V &value) { return U(value.second); }
+};
+
+// generator for set
+template <typename T>
+struct ValueFactory<T, T> {
+ static T make(const T &value) { return value; }
+ static T key(const T &value) { return value; }
+ static T get(const T &value) { return value; }
+ template< typename U >
+ static U convert(const T &value) { return U(value); }
+};
+
+template <typename T>
+struct Value : ValueFactory<typename T::key_type, typename T::value_type> {
+ template<typename U>
+ static bool compare( const typename T::iterator& it, U val ) {
+ return (Value::template convert<U>(*it) == val);
+ }
+};
+
+template<Harness::StateTrackableBase::StateValue desired_state, typename T>
+void check_value_state(/* typename do_check_element_state =*/ tbb::internal::true_type, T const& t, const char* filename, int line )
+{
+ ASSERT_CUSTOM(is_state_f<desired_state>()(t), "", filename, line);
+}
+
+template<Harness::StateTrackableBase::StateValue desired_state, typename T>
+void check_value_state(/* typename do_check_element_state =*/ tbb::internal::false_type, T const&, const char* , int ) {/*do nothing*/}
+
+#define ASSERT_VALUE_STATE(do_check_element_state,state,value) check_value_state<state>(do_check_element_state,value,__FILE__,__LINE__)
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+template<typename T, typename do_check_element_state, typename V>
+void test_rvalue_insert(V v1, V v2)
+{
+ typedef T container_t;
+
+ container_t cont;
+
+ std::pair<typename container_t::iterator, bool> ins = cont.insert(Value<container_t>::make(v1));
+ ASSERT(ins.second == true && Value<container_t>::get(*(ins.first)) == v1, "Element 1 has not been inserted properly");
+ ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::MoveInitialized,*ins.first);
+
+ typename container_t::iterator it2 = cont.insert(ins.first, Value<container_t>::make(v2));
+ ASSERT(Value<container_t>::get(*(it2)) == v2, "Element 2 has not been inserted properly");
+ ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::MoveInitialized,*it2);
+
+}
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+// The test does not use variadic templates, but emplace() does.
+
+namespace emplace_helpers {
+template<typename container_t, typename arg_t, typename value_t>
+std::pair<typename container_t::iterator, bool> call_emplace_impl(container_t& c, arg_t&& k, value_t *){
+ // this is a set
+ return c.emplace(std::forward<arg_t>(k));
+}
+
+template<typename container_t, typename arg_t, typename first_t, typename second_t>
+std::pair<typename container_t::iterator, bool> call_emplace_impl(container_t& c, arg_t&& k, std::pair<first_t, second_t> *){
+ // this is a map
+ return c.emplace(k, std::forward<arg_t>(k));
+}
+
+template<typename container_t, typename arg_t>
+std::pair<typename container_t::iterator, bool> call_emplace(container_t& c, arg_t&& k){
+ typename container_t::value_type * selector = NULL;
+ return call_emplace_impl(c, std::forward<arg_t>(k), selector);
+}
+
+template<typename container_t, typename arg_t, typename value_t>
+typename container_t::iterator call_emplace_hint_impl(container_t& c, typename container_t::const_iterator hint, arg_t&& k, value_t *){
+ // this is a set
+ return c.emplace_hint(hint, std::forward<arg_t>(k));
+}
+
+template<typename container_t, typename arg_t, typename first_t, typename second_t>
+typename container_t::iterator call_emplace_hint_impl(container_t& c, typename container_t::const_iterator hint, arg_t&& k, std::pair<first_t, second_t> *){
+ // this is a map
+ return c.emplace_hint(hint, k, std::forward<arg_t>(k));
+}
+
+template<typename container_t, typename arg_t>
+typename container_t::iterator call_emplace_hint(container_t& c, typename container_t::const_iterator hint, arg_t&& k){
+ typename container_t::value_type * selector = NULL;
+ return call_emplace_hint_impl(c, hint, std::forward<arg_t>(k), selector);
+}
+}
+template<typename T, typename do_check_element_state, typename V>
+void test_emplace_insert(V v1, V v2){
+ typedef T container_t;
+ container_t cont;
+
+ std::pair<typename container_t::iterator, bool> ins = emplace_helpers::call_emplace(cont, v1);
+ ASSERT(ins.second == true && Value<container_t>::compare(ins.first, v1), "Element 1 has not been inserted properly");
+ ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::DirectInitialized,*ins.first);
+
+ typename container_t::iterator it2 = emplace_helpers::call_emplace_hint(cont, ins.first, v2);
+ ASSERT(Value<container_t>::compare(it2, v2), "Element 2 has not been inserted properly");
+ ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::DirectInitialized,*it2);
+}
+#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
+template<typename ContainerType, typename Iterator, typename RangeType>
+std::pair<intptr_t,intptr_t> CheckRecursiveRange(RangeType range) {
+ std::pair<intptr_t,intptr_t> sum(0, 0); // count, sum
+ for( Iterator i = range.begin(), e = range.end(); i != e; ++i ) {
+ ++sum.first; sum.second += Value<ContainerType>::get(*i);
+ }
+ if( range.is_divisible() ) {
+ RangeType range2( range, tbb::split() );
+ std::pair<intptr_t,intptr_t> sum1 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range );
+ std::pair<intptr_t,intptr_t> sum2 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range2 );
+ sum1.first += sum2.first; sum1.second += sum2.second;
+ ASSERT( sum == sum1, "Mismatched ranges after division");
+ }
+ return sum;
+}
+
+template <typename Map>
+void SpecialMapTests( const char *str ){
+ Map cont;
+ const Map &ccont( cont );
+
+ // mapped_type& operator[](const key_type& k);
+ cont[1] = 2;
+
+ // bool empty() const;
+ ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
+
+ // size_type size() const;
+ ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" );
+ ASSERT( cont[1] == 2, "Concurrent container value incorrect" );
+
+ // mapped_type& at( const key_type& k );
+ // const mapped_type& at(const key_type& k) const;
+ ASSERT( cont.at( 1 ) == 2, "Concurrent container value incorrect" );
+ ASSERT( ccont.at( 1 ) == 2, "Concurrent container value incorrect" );
+
+ // iterator find(const key_type& k);
+ typename Map::iterator it = cont.find( 1 );
+ ASSERT( it != cont.end( ) && Value<Map>::get( *(it) ) == 2, "Element with key 1 not properly found" );
+ cont.unsafe_erase( it );
+
+ it = cont.find( 1 );
+ ASSERT( it == cont.end( ), "Element with key 1 not properly erased" );
+ REMARK( "passed -- specialized %s tests\n", str );
+}
+
+template <typename MultiMap>
+void CheckMultiMap(MultiMap &m, int *targets, int tcount, int key) {
+ std::vector<bool> vfound(tcount,false);
+ std::pair<typename MultiMap::iterator, typename MultiMap::iterator> range = m.equal_range( key );
+ for(typename MultiMap::iterator it = range.first; it != range.second; ++it) {
+ bool found = false;
+ for( int i = 0; i < tcount; ++i) {
+ if((*it).second == targets[i]) {
+ if(!vfound[i]) { // we can insert duplicate values
+ vfound[i] = found = true;
+ break;
+ }
+ }
+ }
+ // just in case an extra value in equal_range...
+ ASSERT(found, "extra value from equal range");
+ }
+ for(int i = 0; i < tcount; ++i) ASSERT(vfound[i], "missing value");
+}
+
+template <typename MultiMap>
+void SpecialMultiMapTests( const char *str ){
+ int one_values[] = { 7, 2, 13, 23, 13 };
+ int zero_values[] = { 4, 9, 13, 29, 42, 111};
+ int n_zero_values = sizeof(zero_values) / sizeof(int);
+ int n_one_values = sizeof(one_values) / sizeof(int);
+ MultiMap cont;
+ const MultiMap &ccont( cont );
+ // mapped_type& operator[](const key_type& k);
+ cont.insert( std::make_pair( 1, one_values[0] ) );
+
+ // bool empty() const;
+ ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
+
+ // size_type size() const;
+ ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" );
+ ASSERT( (*(cont.begin( ))).second == one_values[0], "Concurrent container value incorrect" );
+ ASSERT( (*(cont.equal_range( 1 )).first).second == one_values[0], "Improper value from equal_range" );
+ ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" );
+
+ cont.insert( std::make_pair( 1, one_values[1] ) );
+
+ // bool empty() const;
+ ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
+
+ // size_type size() const;
+ ASSERT( ccont.size( ) == 2, "Concurrent container size incorrect" );
+ CheckMultiMap(cont, one_values, 2, 1);
+
+ // insert the other {1,x} values
+ for( int i = 2; i < n_one_values; ++i ) {
+ cont.insert( std::make_pair( 1, one_values[i] ) );
+ }
+
+ CheckMultiMap(cont, one_values, n_one_values, 1);
+ ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" );
+
+ cont.insert( std::make_pair( 0, zero_values[0] ) );
+
+ // bool empty() const;
+ ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
+
+ // size_type size() const;
+ ASSERT( ccont.size( ) == (size_t)(n_one_values+1), "Concurrent container size incorrect" );
+ CheckMultiMap(cont, one_values, n_one_values, 1);
+ CheckMultiMap(cont, zero_values, 1, 0);
+ ASSERT( (*(cont.begin( ))).second == zero_values[0], "Concurrent container value incorrect" );
+ // insert the rest of the zero values
+ for( int i = 1; i < n_zero_values; ++i) {
+ cont.insert( std::make_pair( 0, zero_values[i] ) );
+ }
+ CheckMultiMap(cont, one_values, n_one_values, 1);
+ CheckMultiMap(cont, zero_values, n_zero_values, 0);
+
+ // clear, reinsert interleaved
+ cont.clear();
+ int bigger_num = ( n_one_values > n_zero_values ) ? n_one_values : n_zero_values;
+ for( int i = 0; i < bigger_num; ++i ) {
+ if(i < n_one_values) cont.insert( std::make_pair( 1, one_values[i] ) );
+ if(i < n_zero_values) cont.insert( std::make_pair( 0, zero_values[i] ) );
+ }
+ CheckMultiMap(cont, one_values, n_one_values, 1);
+ CheckMultiMap(cont, zero_values, n_zero_values, 0);
+
+
+ REMARK( "passed -- specialized %s tests\n", str );
+}
+
+template <typename T>
+struct SpecialTests {
+ static void Test(const char *str) {REMARK("skipped -- specialized %s tests\n", str);}
+};
+
+
+
+#if __TBB_RANGE_BASED_FOR_PRESENT
+#include "test_range_based_for.h"
+
+template <typename Container>
+void TestRangeBasedFor() {
+ using namespace range_based_for_support_tests;
+
+ REMARK( "testing range based for loop compatibility \n" );
+ Container cont;
+ const int sequence_length = 100;
+ for ( int i = 1; i <= sequence_length; ++i ) {
+ cont.insert( Value<Container>::make(i) );
+ }
+
+ ASSERT( range_based_for_accumulate( cont, unified_summer(), 0 ) ==
+ gauss_summ_of_int_sequence( sequence_length ),
+ "incorrect accumulated value generated via range based for ?" );
+}
+#endif /* __TBB_RANGE_BASED_FOR_PRESENT */
+
+#if __TBB_INITIALIZER_LISTS_PRESENT
+// Required by test_initializer_list.h
+template<typename container_type>
+bool equal_containers(container_type const& lhs, container_type const& rhs) {
+ if ( lhs.size() != rhs.size() ) {
+ return false;
+ }
+ return std::equal( lhs.begin(), lhs.end(), rhs.begin(), Harness::IsEqual() );
+}
+
+#include "test_initializer_list.h"
+
+template <typename Table, typename MultiTable>
+void TestInitList( std::initializer_list<typename Table::value_type> il ) {
+ using namespace initializer_list_support_tests;
+ REMARK("testing initializer_list methods \n");
+
+ TestInitListSupportWithoutAssign<Table,test_special_insert>(il);
+ TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( il );
+
+#if __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN
+ REPORT( "Known issue: TestInitListSupportWithoutAssign with an empty initializer list is skipped.\n");
+#else
+ TestInitListSupportWithoutAssign<Table, test_special_insert>( {} );
+ TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( {} );
+#endif
+}
+#endif //if __TBB_INITIALIZER_LISTS_PRESENT
+
+template<typename T, typename do_check_element_state>
+void test_basic_common(const char * str, do_check_element_state)
+{
+ T cont;
+ const T &ccont(cont);
+ CheckEmptyContainerAllocatorE(cont, 1, 0); // one dummy is always allocated
+ // bool empty() const;
+ ASSERT(ccont.empty(), "Concurrent container is not empty after construction");
+
+ // size_type size() const;
+ ASSERT(ccont.size() == 0, "Concurrent container is not empty after construction");
+
+ // size_type max_size() const;
+ ASSERT(ccont.max_size() > 0, "Concurrent container max size is invalid");
+
+ //iterator begin();
+ //iterator end();
+ ASSERT(cont.begin() == cont.end(), "Concurrent container iterators are invalid after construction");
+ ASSERT(ccont.begin() == ccont.end(), "Concurrent container iterators are invalid after construction");
+ ASSERT(cont.cbegin() == cont.cend(), "Concurrent container iterators are invalid after construction");
+
+ //std::pair<iterator, bool> insert(const value_type& obj);
+ std::pair<typename T::iterator, bool> ins = cont.insert(Value<T>::make(1));
+ ASSERT(ins.second == true && Value<T>::get(*(ins.first)) == 1, "Element 1 has not been inserted properly");
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ test_rvalue_insert<T,do_check_element_state>(1,2);
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ test_emplace_insert<T,do_check_element_state>(1,2);
+#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
+ // bool empty() const;
+ ASSERT(!ccont.empty(), "Concurrent container is empty after adding an element");
+
+ // size_type size() const;
+ ASSERT(ccont.size() == 1, "Concurrent container size is incorrect");
+
+ std::pair<typename T::iterator, bool> ins2 = cont.insert(Value<T>::make(1));
+
+ if (T::allow_multimapping)
+ {
+ // std::pair<iterator, bool> insert(const value_type& obj);
+ ASSERT(ins2.second == true && Value<T>::get(*(ins2.first)) == 1, "Element 1 has not been inserted properly");
+
+ // size_type size() const;
+ ASSERT(ccont.size() == 2, "Concurrent container size is incorrect");
+
+ // size_type count(const key_type& k) const;
+ ASSERT(ccont.count(1) == 2, "Concurrent container count(1) is incorrect");
+ // std::pair<iterator, iterator> equal_range(const key_type& k);
+ std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
+ typename T::iterator it = range.first;
+ ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ unsigned int count = 0;
+ for (; it != range.second; it++)
+ {
+ count++;
+ ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ }
+
+ ASSERT(count == 2, "Range doesn't have the right number of elements");
+ }
+ else
+ {
+ // std::pair<iterator, bool> insert(const value_type& obj);
+ ASSERT(ins2.second == false && ins2.first == ins.first, "Element 1 should not be re-inserted");
+
+ // size_type size() const;
+ ASSERT(ccont.size() == 1, "Concurrent container size is incorrect");
+
+ // size_type count(const key_type& k) const;
+ ASSERT(ccont.count(1) == 1, "Concurrent container count(1) is incorrect");
+
+ // std::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+ // std::pair<iterator, iterator> equal_range(const key_type& k);
+ std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
+ typename T::iterator it = range.first;
+ ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ ASSERT(++it == range.second, "Range doesn't have the right number of elements");
+ }
+
+ // const_iterator find(const key_type& k) const;
+ // iterator find(const key_type& k);
+ typename T::iterator it = cont.find(1);
+ ASSERT(it != cont.end() && Value<T>::get(*(it)) == 1, "Element 1 has not been found properly");
+ ASSERT(ccont.find(1) == it, "Element 1 has not been found properly");
+
+ // Will be implemented in unordered containers later
+#if !__TBB_UNORDERED_TEST
+ //bool contains(const key_type&k) const
+ ASSERT(cont.contains(1), "contains() cannot detect existing element");
+ ASSERT(!cont.contains(0), "contains() detect not existing element");
+#endif /*__TBB_UNORDERED_TEST*/
+
+ // iterator insert(const_iterator hint, const value_type& obj);
+ typename T::iterator it2 = cont.insert(ins.first, Value<T>::make(2));
+ ASSERT(Value<T>::get(*it2) == 2, "Element 2 has not been inserted properly");
+
+ // T(const T& _Umap)
+ T newcont = ccont;
+ ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Copy construction has not copied the elements properly");
+
+ // this functionality not implemented yet
+ // size_type unsafe_erase(const key_type& k);
+ typename T::size_type size = cont.unsafe_erase(1);
+ ASSERT(T::allow_multimapping ? (size == 2) : (size == 1), "Erase has not removed the right number of elements");
+
+ // iterator unsafe_erase(const_iterator position);
+ typename T::iterator it4 = cont.unsafe_erase(cont.find(2));
+ ASSERT(it4 == cont.end() && cont.size() == 0, "Erase has not removed the last element properly");
+
+ // template<class InputIterator> void insert(InputIterator first, InputIterator last);
+ cont.insert(newcont.begin(), newcont.end());
+ ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Range insert has not copied the elements properly");
+
+ // this functionality not implemented yet
+ // iterator unsafe_erase(const_iterator first, const_iterator last);
+ std::pair<typename T::iterator, typename T::iterator> range2 = newcont.equal_range(1);
+ newcont.unsafe_erase(range2.first, range2.second);
+ ASSERT(newcont.size() == 1, "Range erase has not erased the elements properly");
+
+ // void clear();
+ newcont.clear();
+ ASSERT(newcont.begin() == newcont.end() && newcont.size() == 0, "Clear has not cleared the container");
+
+#if __TBB_INITIALIZER_LISTS_PRESENT
+#if __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
+ REPORT("Known issue: the test for insert with initializer_list is skipped.\n");
+#else
+ // void insert(const std::initializer_list<value_type> &il);
+ newcont.insert( { Value<T>::make( 1 ), Value<T>::make( 2 ), Value<T>::make( 1 ) } );
+ if (T::allow_multimapping) {
+ ASSERT(newcont.size() == 3, "Concurrent container size is incorrect");
+ ASSERT(newcont.count(1) == 2, "Concurrent container count(1) is incorrect");
+ ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect");
+ std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
+ it = range.first;
+ ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ unsigned int count = 0;
+ for (; it != range.second; it++) {
+ count++;
+ ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ }
+ ASSERT(count == 2, "Range doesn't have the right number of elements");
+ range = newcont.equal_range(2); it = range.first;
+ ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly");
+ count = 0;
+ for (; it != range.second; it++) {
+ count++;
+ ASSERT(Value<T>::get(*it) == 2, "Element 2 has not been found properly");
+ }
+ ASSERT(count == 1, "Range doesn't have the right number of elements");
+ } else {
+ ASSERT(newcont.size() == 2, "Concurrent container size is incorrect");
+ ASSERT(newcont.count(1) == 1, "Concurrent container count(1) is incorrect");
+ ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect");
+ std::pair<typename T::iterator, typename T::iterator> range = newcont.equal_range(1);
+ it = range.first;
+ ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
+ ASSERT(++it == range.second, "Range doesn't have the right number of elements");
+ range = newcont.equal_range(2); it = range.first;
+ ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly");
+ ASSERT(++it == range.second, "Range doesn't have the right number of elements");
+ }
+#endif /* __TBB_CPP11_INIT_LIST_TEMP_OBJS_COMPILATION_BROKEN */
+#endif /* __TBB_INITIALIZER_LISTS_PRESENT */
+
+ // T& operator=(const T& _Umap)
+ newcont = ccont;
+ ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Assignment operator has not copied the elements properly");
+
+ REMARK("passed -- basic %s tests\n", str);
+
+#if defined (VERBOSE)
+ REMARK("container dump debug:\n");
+ cont._Dump();
+ REMARK("container dump release:\n");
+ cont.dump();
+ REMARK("\n");
+#endif
+
+ cont.clear();
+ CheckEmptyContainerAllocatorA(cont, 1, 0); // one dummy is always allocated
+ for (int i = 0; i < 256; i++)
+ {
+ std::pair<typename T::iterator, bool> ins3 = cont.insert(Value<T>::make(i));
+ ASSERT(ins3.second == true && Value<T>::get(*(ins3.first)) == i, "Element 1 has not been inserted properly");
+ }
+ ASSERT(cont.size() == 256, "Wrong number of elements have been inserted");
+ ASSERT((256 == CheckRecursiveRange<T,typename T::iterator>(cont.range()).first), NULL);
+ ASSERT((256 == CheckRecursiveRange<T,typename T::const_iterator>(ccont.range()).first), NULL);
+
+ // void swap(T&);
+ cont.swap(newcont);
+ ASSERT(newcont.size() == 256, "Wrong number of elements after swap");
+ ASSERT(newcont.count(200) == 1, "Element with key 200 is not present after swap");
+ ASSERT(newcont.count(16) == 1, "Element with key 16 is not present after swap");
+ ASSERT(newcont.count(99) == 1, "Element with key 99 is not present after swap");
+ ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Assignment operator has not copied the elements properly");
+
+ // Need to be enabled
+ SpecialTests<T>::Test(str);
+}
+
+template<typename T>
+void test_basic_common(const char * str){
+ test_basic_common<T>(str, tbb::internal::false_type());
+}
+
+void test_machine() {
+ ASSERT(__TBB_ReverseByte(0)==0, NULL );
+ ASSERT(__TBB_ReverseByte(1)==0x80, NULL );
+ ASSERT(__TBB_ReverseByte(0xFE)==0x7F, NULL );
+ ASSERT(__TBB_ReverseByte(0xFF)==0xFF, NULL );
+}
+
+template<typename T>
+class FillTable: NoAssign {
+ T &table;
+ const int items;
+ bool my_asymptotic;
+ typedef std::pair<typename T::iterator, bool> pairIB;
+public:
+ FillTable(T &t, int i, bool asymptotic) : table(t), items(i), my_asymptotic(asymptotic) {
+ ASSERT( !(items&1) && items > 100, NULL);
+ }
+ void operator()(int threadn) const {
+ if( threadn == 0 ) { // Fill even keys forward (single thread)
+ bool last_inserted = true;
+ for( int i = 0; i < items; i+=2 ) {
+ pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i));
+ ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted");
+ ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
+ last_inserted = pib.second;
+ }
+ } else if( threadn == 1 ) { // Fill even keys backward (single thread)
+ bool last_inserted = true;
+ for( int i = items-2; i >= 0; i-=2 ) {
+ pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i));
+ ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted");
+ ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
+ last_inserted = pib.second;
+ }
+ } else if( !(threadn&1) ) { // Fill odd keys forward (multiple threads)
+ for( int i = 1; i < items; i+=2 )
+#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
+ if ( i % 32 == 1 && i + 6 < items ) {
+ if (my_asymptotic) {
+ table.insert({ Value<T>::make(1), Value<T>::make(1), Value<T>::make(1) });
+ ASSERT(Value<T>::get(*table.find(1)) == 1, "Element not properly inserted");
+ }
+ else {
+ table.insert({ Value<T>::make(i), Value<T>::make(i + 2), Value<T>::make(i + 4) });
+ ASSERT(Value<T>::get(*table.find(i)) == i, "Element not properly inserted");
+ ASSERT(Value<T>::get(*table.find(i + 2)) == i + 2, "Element not properly inserted");
+ ASSERT(Value<T>::get(*table.find(i + 4)) == i + 4, "Element not properly inserted");
+ }
+ i += 4;
+ } else
+#endif
+ {
+ pairIB pib = table.insert(Value<T>::make(my_asymptotic ? 1 : i));
+ ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic ? 1 : i), "Element not properly inserted");
+ }
+ } else { // Check odd keys backward (multiple threads)
+ if (!my_asymptotic) {
+ bool last_found = false;
+ for( int i = items-1; i >= 0; i-=2 ) {
+ typename T::iterator it = table.find(i);
+ if( it != table.end() ) { // found
+ ASSERT(Value<T>::get(*it) == i, "Element not properly inserted");
+ last_found = true;
+ } else {
+ ASSERT( !last_found, "Previous key was found but this is not" );
+ }
+ }
+ }
+ }
+ }
+};
+
+typedef tbb::atomic<unsigned char> AtomicByte;
+
+template<typename ContainerType, typename RangeType>
+struct ParallelTraverseBody: NoAssign {
+ const int n;
+ AtomicByte* const array;
+ ParallelTraverseBody( AtomicByte an_array[], int a_n ) :
+ n(a_n), array(an_array)
+ {}
+ void operator()( const RangeType& range ) const {
+ for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) {
+ int k = static_cast<int>(Value<ContainerType>::key(*i));
+ ASSERT( k == Value<ContainerType>::get(*i), NULL );
+ ASSERT( 0<=k && k<n, NULL );
+ array[k]++;
+ }
+ }
+};
+
+// if multimapping, oddCount is the value that each odd-indexed array element should have.
+// not meaningful for non-multimapped case.
+void CheckRange( AtomicByte array[], int n, bool allowMultiMapping, int oddCount ) {
+ if(allowMultiMapping) {
+ for( int k = 0; k<n; ++k) {
+ if(k%2) {
+ if( array[k] != oddCount ) {
+ REPORT("array[%d]=%d (should be %d)\n", k, int(array[k]), oddCount);
+ ASSERT(false,NULL);
+ }
+ }
+ else {
+ if(array[k] != 2) {
+ REPORT("array[%d]=%d\n", k, int(array[k]));
+ ASSERT(false,NULL);
+ }
+ }
+ }
+ }
+ else {
+ for( int k=0; k<n; ++k ) {
+ if( array[k] != 1 ) {
+ REPORT("array[%d]=%d\n", k, int(array[k]));
+ ASSERT(false,NULL);
+ }
+ }
+ }
+}
+
+template<typename T>
+class CheckTable: NoAssign {
+ T &table;
+public:
+ CheckTable(T &t) : NoAssign(), table(t) {}
+ void operator()(int i) const {
+ int c = (int)table.count( i );
+ ASSERT( c, "must exist" );
+ }
+};
+
+template<typename T>
+void test_concurrent_common(const char *tablename, bool asymptotic = false) {
+#if TBB_USE_ASSERT
+ int items = 2000;
+#else
+ int items = 20000;
+#endif
+ int nItemsInserted = 0;
+ int nThreads = 0;
+#if __TBB_UNORDERED_TEST
+ T table(items/1000);
+#else
+ T table;
+#endif
+ #if __bgp__
+ nThreads = 6;
+ #else
+ nThreads = 16;
+ #endif
+ if(T::allow_multimapping) {
+ // even passes (threads 0 & 1) put N/2 items each
+ // odd passes (threads > 1) put N/2 if thread is odd, else checks if even.
+ items = 4*items / (nThreads + 2); // approximately same number of items inserted.
+ nItemsInserted = items + (nThreads-2) * items / 4;
+ }
+ else {
+ nItemsInserted = items;
+ }
+ REMARK("%s items == %d\n", tablename, items);
+ tbb::tick_count t0 = tbb::tick_count::now();
+ NativeParallelFor( nThreads, FillTable<T>(table, items, asymptotic) );
+ tbb::tick_count t1 = tbb::tick_count::now();
+ REMARK( "time for filling '%s' by %d items = %g\n", tablename, table.size(), (t1-t0).seconds() );
+ ASSERT( int(table.size()) == nItemsInserted, NULL);
+
+ if(!asymptotic) {
+ AtomicByte* array = new AtomicByte[items];
+ memset( static_cast<void*>(array), 0, items*sizeof(AtomicByte) );
+
+ typename T::range_type r = table.range();
+ std::pair<intptr_t,intptr_t> p = CheckRecursiveRange<T,typename T::iterator>(r);
+ ASSERT((nItemsInserted == p.first), NULL);
+ tbb::parallel_for( r, ParallelTraverseBody<T, typename T::const_range_type>( array, items ));
+ CheckRange( array, items, T::allow_multimapping, (nThreads - 1)/2 );
+
+ const T &const_table = table;
+ memset( static_cast<void*>(array), 0, items*sizeof(AtomicByte) );
+ typename T::const_range_type cr = const_table.range();
+ ASSERT((nItemsInserted == CheckRecursiveRange<T,typename T::const_iterator>(cr).first), NULL);
+ tbb::parallel_for( cr, ParallelTraverseBody<T, typename T::const_range_type>( array, items ));
+ CheckRange( array, items, T::allow_multimapping, (nThreads - 1) / 2 );
+ delete[] array;
+
+ tbb::parallel_for( 0, items, CheckTable<T>( table ) );
+ }
+
+ table.clear();
+ CheckEmptyContainerAllocatorA(table, items+1, items); // one dummy is always allocated
+
+}
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+#include "test_container_move_support.h"
+
+template<typename container_traits>
+void test_rvalue_ref_support(const char* container_name){
+ TestMoveConstructor<container_traits>();
+ TestMoveAssignOperator<container_traits>();
+#if TBB_USE_EXCEPTIONS
+ TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<container_traits>();
+ TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<container_traits>();
+#endif //TBB_USE_EXCEPTIONS
+ REMARK("passed -- %s move support tests\n", container_name);
+}
+#endif //__TBB_CPP11_RVALUE_REF_PRESENT
+
+namespace test_select_size_t_constant{
+ __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<1234,1234>::value == 1234),"select_size_t_constant::value is not compile time constant");
+// There will be two constant used in the test 32 bit and 64 bit one.
+// The 64 bit constant should chosen so that it 32 bit halves adds up to the 32 bit one ( first constant used in the test).
+// % ~0U is used to sum up 32bit halves of the 64 constant. ("% ~0U" essentially adds the 32-bit "digits", like "%9" adds
+// the digits (modulo 9) of a number in base 10).
+// So iff select_size_t_constant is correct result of the calculation below will be same on both 32bit and 64bit platforms.
+ __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<0x12345678U,0x091A2B3C091A2B3CULL>::value % ~0U == 0x12345678U),
+ "select_size_t_constant have chosen the wrong constant");
+}
+
+#if __TBB_CPP11_SMART_POINTERS_PRESENT
+// For the sake of simplified testing, make unique_ptr implicitly convertible to/from the pointer
+namespace test {
+ template<typename T>
+ class unique_ptr : public std::unique_ptr<T> {
+ public:
+ typedef typename std::unique_ptr<T>::pointer pointer;
+ unique_ptr( pointer p ) : std::unique_ptr<T>(p) {}
+ operator pointer() const { return this->get(); }
+ };
+}
+#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
+
+#include <vector>
+#include <list>
+#include <algorithm>
+
+template <typename ValueType>
+class TestRange : NoAssign {
+ const std::list<ValueType> &my_lst;
+ std::vector< tbb::atomic<bool> > &my_marks;
+public:
+ TestRange( const std::list<ValueType> &lst, std::vector< tbb::atomic<bool> > &marks ) : my_lst( lst ), my_marks( marks ) {
+ std::fill( my_marks.begin(), my_marks.end(), false );
+ }
+ template <typename Range>
+ void operator()( const Range &r ) const { doTestRange( r.begin(), r.end() ); }
+ template<typename Iterator>
+ void doTestRange( Iterator i, Iterator j ) const {
+ for ( Iterator it = i; it != j; ) {
+ Iterator prev_it = it++;
+ typename std::list<ValueType>::const_iterator it2 = std::search( my_lst.begin(), my_lst.end(), prev_it, it, Harness::IsEqual() );
+ ASSERT( it2 != my_lst.end(), NULL );
+ typename std::list<ValueType>::difference_type dist = std::distance( my_lst.begin( ), it2 );
+ ASSERT( !my_marks[dist], NULL );
+ my_marks[dist] = true;
+ }
+ }
+};
+
+// The helper to call a function only when a doCall == true.
+template <bool doCall> struct CallIf {
+ template<typename FuncType> void operator() ( FuncType func ) const { func(); }
+};
+template <> struct CallIf<false> {
+ template<typename FuncType> void operator()( FuncType ) const {}
+};
+
+template <typename Table>
+class TestOperatorSquareBrackets : NoAssign {
+ typedef typename Table::value_type ValueType;
+ Table &my_c;
+ const ValueType &my_value;
+public:
+ TestOperatorSquareBrackets( Table &c, const ValueType &value ) : my_c( c ), my_value( value ) {}
+ void operator()() const {
+ ASSERT( Harness::IsEqual()(my_c[my_value.first], my_value.second), NULL );
+ }
+};
+
+template <bool defCtorPresent, typename Table, typename Value>
+void TestMapSpecificMethodsImpl(Table &c, const Value &value){
+ CallIf<defCtorPresent>()(TestOperatorSquareBrackets<Table>( c, value ));
+ ASSERT( Harness::IsEqual()(c.at( value.first ), value.second), NULL );
+ const Table &constC = c;
+ ASSERT( Harness::IsEqual()(constC.at( value.first ), value.second), NULL );
+}
+
+// do nothing for common case
+template <bool defCtorPresent, typename Table, typename Value>
+void TestMapSpecificMethods( Table&, const Value& ) {}
+
+template <bool defCtorPresent, typename Table>
+class CheckValue : NoAssign {
+ Table &my_c;
+public:
+ CheckValue( Table &c ) : my_c( c ) {}
+ void operator()( const typename Table::value_type &value ) {
+ typedef typename Table::iterator Iterator;
+ typedef typename Table::const_iterator ConstIterator;
+ const Table &constC = my_c;
+ ASSERT( my_c.count( Value<Table>::key( value ) ) == 1, NULL );
+ // find
+ ASSERT( Harness::IsEqual()(*my_c.find( Value<Table>::key( value ) ), value), NULL );
+ ASSERT( Harness::IsEqual()(*constC.find( Value<Table>::key( value ) ), value), NULL );
+ // erase
+ ASSERT( my_c.unsafe_erase( Value<Table>::key( value ) ), NULL );
+ ASSERT( my_c.count( Value<Table>::key( value ) ) == 0, NULL );
+ // insert
+ std::pair<Iterator, bool> res = my_c.insert( value );
+ ASSERT( Harness::IsEqual()(*res.first, value), NULL );
+ ASSERT( res.second, NULL);
+ // erase
+ Iterator it = res.first;
+ it++;
+ ASSERT( my_c.unsafe_erase( res.first ) == it, NULL );
+ // insert
+ ASSERT( Harness::IsEqual()(*my_c.insert( my_c.begin(), value ), value), NULL );
+ // equal_range
+ std::pair<Iterator, Iterator> r1 = my_c.equal_range( Value<Table>::key( value ) );
+ ASSERT( Harness::IsEqual()(*r1.first, value) && ++r1.first == r1.second, NULL );
+ std::pair<ConstIterator, ConstIterator> r2 = constC.equal_range( Value<Table>::key( value ) );
+ ASSERT( Harness::IsEqual()(*r2.first, value) && ++r2.first == r2.second, NULL );
+
+ TestMapSpecificMethods<defCtorPresent>( my_c, value );
+ }
+};
+
+#include "tbb/task_scheduler_init.h"
+
+template <bool defCtorPresent, typename Table>
+void CommonExamine( Table c, const std::list<typename Table::value_type> lst) {
+ typedef typename Table::value_type ValueType;
+
+ ASSERT( !c.empty() && c.size() == lst.size() && c.max_size() >= c.size(), NULL );
+
+ std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c ) );
+
+ std::vector< tbb::atomic<bool> > marks( lst.size() );
+
+ TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() );
+ ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
+
+ TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() );
+ ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
+
+ const Table constC = c;
+ ASSERT( c.size() == constC.size(), NULL );
+
+ TestRange<ValueType>( lst, marks ).doTestRange( constC.cbegin(), constC.cend() );
+ ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
+
+ tbb::task_scheduler_init init;
+
+ tbb::parallel_for( c.range(), TestRange<ValueType>( lst, marks ) );
+ ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
+
+ tbb::parallel_for( constC.range( ), TestRange<ValueType>( lst, marks ) );
+ ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
+
+ Table c2;
+ typename std::list<ValueType>::const_iterator begin5 = lst.begin();
+ std::advance( begin5, 5 );
+ c2.insert( lst.begin(), begin5 );
+ std::for_each( lst.begin(), begin5, CheckValue<defCtorPresent, Table>( c2 ) );
+
+ c2.swap( c );
+ ASSERT( c2.size() == lst.size(), NULL );
+ ASSERT( c.size() == 5, NULL );
+ std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c2 ) );
+
+ c2.clear();
+ ASSERT( c2.size() == 0, NULL );
+
+ typename Table::allocator_type a = c.get_allocator();
+ ValueType *ptr = a.allocate( 1 );
+ ASSERT( ptr, NULL );
+ a.deallocate( ptr, 1 );
+}
+
+// overload for set and multiset
+// second argument is needed just for right deduction
+template <typename Checker>
+void TestSetCommonTypes() {
+ Checker CheckTypes;
+ const int NUMBER = 10;
+
+ std::list<int> arrInt;
+ for ( int i = 0; i<NUMBER; ++i ) arrInt.push_back( i );
+ CheckTypes.template check</*defCtorPresent = */true>( arrInt );
+
+ std::list< tbb::atomic<int> > arrTbb(NUMBER);
+ int seq = 0;
+ for ( std::list< tbb::atomic<int> >::iterator it = arrTbb.begin(); it != arrTbb.end(); ++it, ++seq ) *it = seq;
+ CheckTypes.template check</*defCtorPresent = */true>( arrTbb );
+
+#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN
+ std::list< std::reference_wrapper<int> > arrRef;
+ for ( std::list<int>::iterator it = arrInt.begin( ); it != arrInt.end( ); ++it )
+ arrRef.push_back( std::reference_wrapper<int>(*it) );
+ CheckTypes.template check</*defCtorPresent = */false>( arrRef );
+#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN */
+
+#if __TBB_CPP11_SMART_POINTERS_PRESENT
+ std::list< std::shared_ptr<int> > arrShr;
+ for ( int i = 0; i<NUMBER; ++i ) arrShr.push_back( std::make_shared<int>( i ) );
+ CheckTypes.template check</*defCtorPresent = */true>( arrShr );
+
+ std::list< std::weak_ptr<int> > arrWk;
+ std::copy( arrShr.begin( ), arrShr.end( ), std::back_inserter( arrWk ) );
+ CheckTypes.template check</*defCtorPresent = */true>( arrWk );
+#else
+ REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" );
+#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
+}
+
+template <typename Checker>
+void TestMapCommonTypes() {
+ Checker CheckTypes;
+ const int NUMBER = 10;
+
+ std::list< std::pair<const int, int> > arrIntInt;
+ for ( int i = 0; i < NUMBER; ++i ) arrIntInt.push_back( std::make_pair( i, NUMBER - i ) );
+ CheckTypes.template check</*def_ctor_present = */true>( arrIntInt );
+
+ std::list< std::pair< const int, tbb::atomic<int> > > arrIntTbb;
+ for ( int i = 0; i < NUMBER; ++i ) {
+ tbb::atomic<int> b;
+ b = NUMBER - i;
+ arrIntTbb.push_back( std::make_pair( i, b ) );
+ }
+ CheckTypes.template check</*defCtorPresent = */true>( arrIntTbb );
+
+#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN
+ std::list< std::pair<const std::reference_wrapper<const int>, int> > arrRefInt;
+ for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it )
+ arrRefInt.push_back( std::make_pair( std::reference_wrapper<const int>( it->first ), it->second ) );
+ CheckTypes.template check</*defCtorPresent = */true>( arrRefInt );
+
+ std::list< std::pair<const int, std::reference_wrapper<int> > > arrIntRef;
+ for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) {
+ // Using std::make_pair below causes compilation issues with early implementations of std::reference_wrapper.
+ arrIntRef.push_back( std::pair<const int, std::reference_wrapper<int> >( it->first, std::reference_wrapper<int>( it->second ) ) );
+ }
+ CheckTypes.template check</*defCtorPresent = */false>( arrIntRef );
+#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN */
+
+#if __TBB_CPP11_SMART_POINTERS_PRESENT
+ std::list< std::pair< const std::shared_ptr<int>, std::shared_ptr<int> > > arrShrShr;
+ for ( int i = 0; i < NUMBER; ++i ) {
+ const int NUMBER_minus_i = NUMBER - i;
+ arrShrShr.push_back( std::make_pair( std::make_shared<int>( i ), std::make_shared<int>( NUMBER_minus_i ) ) );
+ }
+ CheckTypes.template check</*defCtorPresent = */true>( arrShrShr );
+
+ std::list< std::pair< const std::weak_ptr<int>, std::weak_ptr<int> > > arrWkWk;
+ std::copy( arrShrShr.begin(), arrShrShr.end(), std::back_inserter( arrWkWk ) );
+ CheckTypes.template check</*defCtorPresent = */true>( arrWkWk );
+
+#else
+ REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" );
+#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
+}
+
+
+#if __TBB_UNORDERED_NODE_HANDLE_PRESENT || __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+namespace node_handling{
+ template<typename Handle>
+ bool compare_handle_getters(
+ const Handle& node, const std::pair<typename Handle::key_type, typename Handle::mapped_type>& expected
+ ) {
+ return node.key() == expected.first && node.mapped() == expected.second;
+ }
+
+ template<typename Handle>
+ bool compare_handle_getters( const Handle& node, const typename Handle::value_type& value) {
+ return node.value() == value;
+ }
+
+ template<typename Handle>
+ void set_node_handle_value(
+ Handle& node, const std::pair<typename Handle::key_type, typename Handle::mapped_type>& value
+ ) {
+ node.key() = value.first;
+ node.mapped() = value.second;
+ }
+
+ template<typename Handle>
+ void set_node_handle_value( Handle& node, const typename Handle::value_type& value) {
+ node.value() = value;
+ }
+
+ template <typename node_type>
+ void TestTraits() {
+ ASSERT( !std::is_copy_constructible<node_type>::value,
+ "Node handle: Handle is copy constructable" );
+ ASSERT( !std::is_copy_assignable<node_type>::value,
+ "Node handle: Handle is copy assignable" );
+ ASSERT( std::is_move_constructible<node_type>::value,
+ "Node handle: Handle is not move constructable" );
+ ASSERT( std::is_move_assignable<node_type>::value,
+ "Node handle: Handle is not move constructable" );
+ ASSERT( std::is_default_constructible<node_type>::value,
+ "Node handle: Handle is not default constructable" );
+ ASSERT( std::is_destructible<node_type>::value,
+ "Node handle: Handle is not destructible" );
+ }
+
+ template <typename Table>
+ void TestHandle( Table test_table ) {
+ ASSERT( test_table.size()>1, "Node handle: Container must contains 2 or more elements" );
+ // Initialization
+ using node_type = typename Table::node_type;
+
+ TestTraits<node_type>();
+
+ // Default Ctor and empty function
+ node_type nh;
+ ASSERT( nh.empty(), "Node handle: Node is not empty after initialization" );
+
+ // Move Assign
+ // key/mapped/value function
+ auto expected_value = *test_table.begin();
+
+ nh = test_table.unsafe_extract(test_table.begin());
+ ASSERT( !nh.empty(), "Node handle: Node handle is empty after valid move assigning" );
+ ASSERT( compare_handle_getters(nh,expected_value),
+ "Node handle: After valid move assigning "
+ "node handle does not contains expected value");
+
+ // Move Ctor
+ // key/mapped/value function
+ node_type nh2(std::move(nh));
+ ASSERT( nh.empty(), "Node handle: After valid move construction node handle is empty" );
+ ASSERT( !nh2.empty(), "Node handle: After valid move construction "
+ "argument hode handle was not moved" );
+ ASSERT( compare_handle_getters(nh2,expected_value),
+ "Node handle: After valid move construction "
+ "node handle does not contains expected value" );
+
+ // Bool conversion
+ ASSERT( nh2, "Node hanlde: Wrong not handle bool conversion" );
+
+ // Change key/mapped/value of node handle
+ auto expected_value2 = *test_table.begin();
+ set_node_handle_value(nh2, expected_value2);
+ ASSERT( compare_handle_getters(nh2, expected_value2),
+ "Node handle: Wrong node handle key/mapped/value changing behavior" );
+
+ // Member/non member swap check
+ node_type empty_node;
+ // We extract this element for nh2 and nh3 difference
+ test_table.unsafe_extract(test_table.begin());
+ auto expected_value3 = *test_table.begin();
+ node_type nh3(test_table.unsafe_extract(test_table.begin()));
+
+ // Both of node handles are not empty
+ nh3.swap(nh2);
+ ASSERT( compare_handle_getters(nh3, expected_value2),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( compare_handle_getters(nh2, expected_value3),
+ "Node handle: Wrong node handle swap behavior" );
+
+ std::swap(nh2,nh3);
+ ASSERT( compare_handle_getters(nh3, expected_value3),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( compare_handle_getters(nh2, expected_value2),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( !nh2.empty(), "Node handle: Wrong node handle swap behavior" );
+ ASSERT( !nh3.empty(), "Node handle: Wrong node handle swap behavior" );
+
+ // One of nodes is empty
+ nh3.swap(empty_node);
+ ASSERT( compare_handle_getters(std::move(empty_node), expected_value3),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( nh3.empty(), "Node handle: Wrong node handle swap behavior" );
+
+ std::swap(empty_node, nh3);
+ ASSERT( compare_handle_getters(std::move(nh3), expected_value3),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( empty_node.empty(), "Node handle: Wrong node handle swap behavior" );
+
+ empty_node.swap(nh3);
+ ASSERT( compare_handle_getters(std::move(empty_node), expected_value3),
+ "Node handle: Wrong node handle swap behavior" );
+ ASSERT( nh3.empty(), "Node handle: Wrong node handle swap behavior" );
+ }
+
+ template <typename Table>
+ typename Table::node_type GenerateNodeHandle(const typename Table::value_type& value) {
+ Table temp_table;
+ temp_table.insert(value);
+ return temp_table.unsafe_extract(temp_table.cbegin());
+ }
+
+ template <typename Table>
+ void IteratorAssertion( const Table& table,
+ const typename Table::iterator& result,
+ const typename Table::value_type* node_value = nullptr ) {
+ if (node_value==nullptr) {
+ ASSERT( result==table.end(), "Insert: Result iterator does not "
+ "contains end pointer after empty node insertion" );
+ } else {
+ if (!Table::allow_multimapping) {
+ ASSERT( result==table.find(Value<Table>::key( *node_value )) &&
+ result != table.end(),
+ "Insert: After node insertion result iterator"
+ " doesn't contains address to equal element in table" );
+ } else {
+ ASSERT( *result==*node_value, "Insert: Result iterator contains"
+ "wrong content after successful insertion" );
+
+ for (auto it = table.begin(); it != table.end(); ++it) {
+ if (it == result) return;
+ }
+ ASSERT( false, "Insert: After successful insertion result "
+ "iterator contains address that is not in the table" );
+ }
+ }
+ }
+ // overload for multitable or insertion with hint iterator
+ template <typename Table>
+ void InsertAssertion( const Table& table,
+ const typename Table::iterator& result,
+ bool,
+ const typename Table::value_type* node_value = nullptr ) {
+ IteratorAssertion(table, result, node_value);
+ }
+
+ // Not multitable overload
+ template <typename Table>
+ void InsertAssertion( const Table& table,
+ const std::pair<typename Table::iterator, bool>& result,
+ bool second_value,
+ const typename Table::value_type* node_value = nullptr ) {
+ IteratorAssertion(table, result.first, node_value);
+
+ ASSERT( result.second == second_value || Table::allow_multimapping,
+ "Insert: Returned bool wrong value after node insertion" );
+ }
+
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ // Internal func for testing
+ // Can't delete ref from "Table" argument because hint must point to element of table
+ namespace {
+ template <typename Table, typename... Hint>
+ void TestInsertOverloads( Table& table_to_insert,
+ const typename Table::value_type &value, const Hint&... hint ) {
+ // Insert empty element
+ typename Table::node_type nh;
+
+ auto table_size = table_to_insert.size();
+ auto result = table_to_insert.insert(hint..., std::move(nh));
+ InsertAssertion(table_to_insert, result, /*second_value*/ false);
+ ASSERT( table_to_insert.size() == table_size,
+ "Insert: After empty node insertion table size changed" );
+
+ // Standart insertion
+ nh = GenerateNodeHandle<Table>(value);
+
+ result = table_to_insert.insert(hint..., std::move(nh));
+ ASSERT( nh.empty(), "Insert: Not empty handle after successful insertion" );
+ InsertAssertion(table_to_insert, result, /*second_value*/ true, &value);
+
+ // Insert existing node
+ nh = GenerateNodeHandle<Table>(value);
+
+ result = table_to_insert.insert(hint..., std::move(nh));
+
+ InsertAssertion(table_to_insert, result, /*second_value*/ false, &value);
+
+ if (Table::allow_multimapping){
+ ASSERT( nh.empty(), "Insert: Failed insertion to multitable" );
+ } else {
+ ASSERT( !nh.empty() , "Insert: Empty handle after failed insertion" );
+ ASSERT( compare_handle_getters( std::move(nh), value ),
+ "Insert: Existing data does not equal to the one being inserted" );
+ }
+ }
+ }
+
+ template <typename Table>
+ void TestInsert( Table table, const typename Table::value_type & value) {
+ ASSERT( !table.empty(), "Insert: Map should contains 1 or more elements" );
+ Table table_backup(table);
+ TestInsertOverloads(table, value);
+ TestInsertOverloads(table_backup, value, table_backup.begin());
+ }
+#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT*/
+
+ template <typename Table>
+ void TestExtract( Table table_for_extract, typename Table::key_type new_key ) {
+ ASSERT( table_for_extract.size()>1, "Extract: Container must contains 2 or more element" );
+ ASSERT( table_for_extract.find(new_key)==table_for_extract.end(),
+ "Extract: Table must not contains new element!");
+
+ // Extract new element
+ auto nh = table_for_extract.unsafe_extract(new_key);
+ ASSERT( nh.empty(), "Extract: Node handle is not empty after wrong key extraction" );
+
+ // Valid key extraction
+ auto expected_value = *table_for_extract.cbegin();
+ auto key = Value<Table>::key( expected_value );
+ auto count = table_for_extract.count(key);
+
+ nh = table_for_extract.unsafe_extract(key);
+ ASSERT( !nh.empty(),
+ "Extract: After successful extraction by key node handle is empty" );
+ ASSERT( compare_handle_getters(std::move(nh), expected_value),
+ "Extract: After successful extraction by key node handle contains wrong value" );
+ ASSERT( table_for_extract.count(key) == count - 1,
+ "Extract: After successful node extraction by key, table still contains this key" );
+
+ // Valid iterator overload
+ auto expected_value2 = *table_for_extract.cbegin();
+ auto key2 = Value<Table>::key( expected_value2 );
+ auto count2 = table_for_extract.count(key2);
+
+ nh = table_for_extract.unsafe_extract(table_for_extract.cbegin());
+ ASSERT( !nh.empty(),
+ "Extract: After successful extraction by iterator node handle is empty" );
+ ASSERT( compare_handle_getters(std::move(nh), expected_value2),
+ "Extract: After successful extraction by iterator node handle contains wrong value" );
+ ASSERT( table_for_extract.count(key2) == count2 - 1,
+ "Extract: After successful extraction table also contains this element" );
+ }
+
+ // All test exclude merge
+ template <typename Table>
+ void NodeHandlingTests ( const Table& table,
+ const typename Table::value_type& new_value) {
+ TestHandle(table);
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ TestInsert(table, new_value);
+#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT*/
+ TestExtract(table, Value<Table>::key( new_value ));
+ }
+
+ template <typename TableType1, typename TableType2>
+ void TestMerge( TableType1 table1, TableType2&& table2 ) {
+ using Table2PureType = typename std::decay<TableType2>::type;
+ // Initialization
+ TableType1 table1_backup = table1;
+ // For copying lvalue
+ Table2PureType table2_backup = table2;
+
+ table1.merge(std::forward<TableType2>(table2));
+ for (auto it: table2) {
+ ASSERT( table1.find( Value<Table2PureType>::key( it ) ) != table1.end(),
+ "Merge: Some key(s) was not merged" );
+ }
+
+ // After the following step table1 will contains only merged elements from table2
+ for (auto it: table1_backup) {
+ table1.unsafe_extract(Value<TableType1>::key( it ));
+ }
+ // After the following step table2_backup will contains only merged elements from table2
+ for (auto it: table2) {
+ table2_backup.unsafe_extract(Value<Table2PureType>::key( it ));
+ }
+
+ ASSERT ( table1.size() == table2_backup.size(), "Merge: Size of tables is not equal" );
+ for (auto it: table2_backup) {
+ ASSERT( table1.find( Value<Table2PureType>::key( it ) ) != table1.end(),
+ "Merge: Wrong merge behavior" );
+ }
+ }
+
+ // Testing of rvalue and lvalue overloads
+ template <typename TableType1, typename TableType2>
+ void TestMergeOverloads( const TableType1& table1, TableType2 table2 ) {
+ TableType2 table_backup(table2);
+ TestMerge(table1, table2);
+ TestMerge(table1, std::move(table_backup));
+ }
+
+ template <typename Table, typename MultiTable>
+ void TestMergeTransposition( Table table1, Table table2,
+ MultiTable multitable1, MultiTable multitable2 ) {
+ Table empty_map;
+ MultiTable empty_multimap;
+
+ // Map transpositions
+ node_handling::TestMergeOverloads(table1, table2);
+ node_handling::TestMergeOverloads(table1, empty_map);
+ node_handling::TestMergeOverloads(empty_map, table2);
+
+ // Multimap transpositions
+ node_handling::TestMergeOverloads(multitable1, multitable2);
+ node_handling::TestMergeOverloads(multitable1, empty_multimap);
+ node_handling::TestMergeOverloads(empty_multimap, multitable2);
+
+ // Map/Multimap transposition
+ node_handling::TestMergeOverloads(table1, multitable1);
+ node_handling::TestMergeOverloads(multitable2, table2);
+ }
+
+ template <typename Table>
+ void AssertionConcurrentMerge ( Table start_data, Table src_table, std::vector<Table> tables,
+ std::true_type) {
+ ASSERT( src_table.size() == start_data.size()*tables.size(),
+ "Merge: Incorrect merge for some elements" );
+
+ for(auto it: start_data) {
+ ASSERT( src_table.count( Value<Table>::key( it ) ) ==
+ start_data.count( Value<Table>::key( it ) )*tables.size(),
+ "Merge: Incorrect merge for some element" );
+ }
+
+ for (size_t i = 0; i < tables.size(); i++) {
+ ASSERT( tables[i].empty(), "Merge: Some elements was not merged" );
+ }
+ }
+
+ template <typename Table>
+ void AssertionConcurrentMerge ( Table start_data, Table src_table, std::vector<Table> tables,
+ std::false_type) {
+ Table expected_result;
+ for (auto table: tables)
+ for (auto it: start_data) {
+ // If we cannot find some element in some table, then it has been moved
+ if (table.find( Value<Table>::key( it ) ) == table.end()){
+ bool result = expected_result.insert( it ).second;
+ ASSERT( result, "Merge: Some element was merged twice or was not "
+ "returned to his owner after unsuccessful merge");
+ }
+ }
+
+ ASSERT( expected_result.size() == src_table.size() && start_data.size() == src_table.size(),
+ "Merge: wrong size of result table");
+ for (auto it: expected_result) {
+ if ( src_table.find( Value<Table>::key( it ) ) != src_table.end() &&
+ start_data.find( Value<Table>::key( it ) ) != start_data.end() ){
+ src_table.unsafe_extract(Value<Table>::key( it ));
+ start_data.unsafe_extract(Value<Table>::key( it ));
+ } else {
+ ASSERT( false, "Merge: Incorrect merge for some element" );
+ }
+ }
+
+ ASSERT( src_table.empty()&&start_data.empty(), "Merge: Some elements were not merged" );
+ }
+
+ template <typename Table>
+ void TestConcurrentMerge (const Table& table_data) {
+ for (auto num_threads = MinThread + 1; num_threads <= MaxThread; num_threads++){
+ std::vector<Table> tables;
+ Table src_table;
+
+ for (auto j = 0; j < num_threads; j++){
+ tables.push_back(table_data);
+ }
+
+ NativeParallelFor( num_threads, [&](size_t index){ src_table.merge(tables[index]); } );
+
+ AssertionConcurrentMerge( table_data, src_table, tables,
+ std::integral_constant<bool,Table::allow_multimapping>{});
+ }
+ }
+
+
+ template <typename Table>
+ void TestNodeHandling(){
+ Table table;
+
+ for (int i = 1; i < 5; i++)
+ table.insert(Value<Table>::make(i));
+
+ if (Table::allow_multimapping)
+ table.insert(Value<Table>::make(4));
+
+ node_handling::NodeHandlingTests(table, Value<Table>::make(5));
+ }
+
+ template <typename TableType1, typename TableType2>
+ void TestMerge(int size){
+ TableType1 table1_1;
+ TableType1 table1_2;
+ int i = 1;
+ for (; i < 5; ++i) {
+ table1_1.insert(Value<TableType1>::make(i));
+ table1_2.insert(Value<TableType1>::make(i*i));
+ }
+ if (TableType1::allow_multimapping) {
+ table1_1.insert(Value<TableType1>::make(i));
+ table1_2.insert(Value<TableType1>::make(i*i));
+ }
+
+ TableType2 table2_1;
+ TableType2 table2_2;
+ for (i = 3; i < 7; ++i) {
+ table1_1.insert(Value<TableType2>::make(i));
+ table1_2.insert(Value<TableType2>::make(i*i));
+ }
+ if (TableType2::allow_multimapping) {
+ table2_1.insert(Value<TableType2>::make(i));
+ table2_2.insert(Value<TableType2>::make(i*i));
+ }
+
+ node_handling::TestMergeTransposition(table1_1, table1_2,
+ table2_1, table2_2);
+
+ TableType1 table1_3;
+ for (i = 0; i<size; ++i){
+ table1_3.insert(Value<TableType1>::make(i));
+ }
+ node_handling::TestConcurrentMerge(table1_3);
+
+ TableType2 table2_3;
+ for (i = 0; i<size; ++i){
+ table2_3.insert(Value<TableType2>::make(i));
+ }
+ node_handling::TestConcurrentMerge(table2_3);
+}
+}
+#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT || __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && !__TBB_SCOPED_ALLOCATOR_BROKEN
#include <scoped_allocator>
-template<typename Allocator>
-class allocator_aware_data {
-public:
- static bool assert_on_constructions;
- typedef Allocator allocator_type;
-
- allocator_aware_data(const allocator_type& allocator = allocator_type())
- : my_allocator(allocator), my_value(0) {}
- allocator_aware_data(int v, const allocator_type& allocator = allocator_type())
- : my_allocator(allocator), my_value(v) {}
- allocator_aware_data(const allocator_aware_data&) {
- ASSERT(!assert_on_constructions, "Allocator should propogate to the data during copy construction");
- }
- allocator_aware_data(allocator_aware_data&&) {
- ASSERT(!assert_on_constructions, "Allocator should propogate to the data during move construction");
- }
- allocator_aware_data(const allocator_aware_data& rhs, const allocator_type& allocator)
- : my_allocator(allocator), my_value(rhs.my_value) {}
- allocator_aware_data(allocator_aware_data&& rhs, const allocator_type& allocator)
- : my_allocator(allocator), my_value(rhs.my_value) {}
-
- int value() const { return my_value; }
-private:
- allocator_type my_allocator;
- int my_value;
-};
-
struct custom_hash_compare {
template<typename Allocator>
static size_t hash(const allocator_aware_data<Allocator>& key) {
}
};
-template<typename Allocator>
-bool allocator_aware_data<Allocator>::assert_on_constructions = false;
-
void TestScopedAllocator() {
typedef allocator_aware_data<std::scoped_allocator_adaptor<tbb::tbb_allocator<int>>> allocator_data_type;
typedef std::scoped_allocator_adaptor<tbb::tbb_allocator<allocator_data_type>> allocator_type;
}
#endif
-// C++03 allocator doesn't have to be assignable or swappable, so
-// tbb::internal::allocator_traits defines POCCA and POCS as false_type
#if __TBB_ALLOCATOR_TRAITS_PRESENT
-
-template<typename Allocator>
-void test_traits() {
- typedef int key_type;
-
- typedef int mapped_type;
- typedef tbb::tbb_hash_compare<key_type> compare_type;
-
- typedef typename Allocator::propagate_on_container_copy_assignment pocca;
- typedef typename Allocator::propagate_on_container_swap pocs;
-
- typedef tbb::concurrent_hash_map<key_type, mapped_type, compare_type, Allocator> container_type;
- bool propagated_on_copy_assign = false;
- bool propagated_on_move = false;
- bool propagated_on_swap = false;
- bool selected_on_copy_construct = false;
-
- Allocator alloc(propagated_on_copy_assign, propagated_on_move, propagated_on_swap, selected_on_copy_construct);
-
- container_type c1(alloc), c2(c1);
- ASSERT(selected_on_copy_construct, "select_on_container_copy_construction function was not called");
-
- c1 = c2;
- ASSERT(propagated_on_copy_assign == pocca::value, "Unexpected allocator propagation on copy assignment");
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT
- typedef typename Allocator::propagate_on_container_move_assignment pocma;
- c2 = std::move(c1);
- ASSERT(propagated_on_move == pocma::value, "Unexpected allocator propagation on move assignment");
-#endif
-
- c1.swap(c2);
- ASSERT(propagated_on_swap == pocs::value, "Unexpected allocator propagation on swap");
-}
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT
-class non_movable_object {
- non_movable_object() {}
-private:
- non_movable_object(non_movable_object&&);
- non_movable_object& operator=(non_movable_object&&);
-};
-
-void test_non_movable_value_type() {
- // Check, that if pocma is true, concurrent_hash_map allows move assignment without per-element move
- typedef propagating_allocator<tbb::tbb_allocator<int>, /*POCMA=*/tbb::internal::traits_true_type> allocator_type;
- typedef tbb::concurrent_hash_map<int, non_movable_object, tbb::tbb_hash_compare<int>, allocator_type> container_type;
- allocator_type alloc;
- container_type container1(alloc), container2(alloc);
- container1 = std::move(container2);
-}
-
-#endif // __TBB_CPP11_RVALUE_REF_PRESENT
-
void TestAllocatorTraits() {
- typedef tbb::tbb_allocator<int> base_allocator;
- typedef tbb::internal::traits_true_type true_type;
- typedef tbb::internal::traits_true_type false_type;
-
- typedef propagating_allocator<base_allocator, /*POCMA=*/true_type, /*POCCA=*/true_type, /*POCS=*/true_type>
- always_propagating_allocator;
- typedef propagating_allocator<base_allocator, false_type, false_type, false_type> never_propagating_allocator;
- typedef propagating_allocator<base_allocator, true_type, false_type, false_type> pocma_allocator;
- typedef propagating_allocator<base_allocator, false_type, true_type, false_type> pocca_allocator;
- typedef propagating_allocator<base_allocator, false_type, false_type, true_type> pocs_allocator;
-
- test_traits<always_propagating_allocator>();
- test_traits<never_propagating_allocator>();
- test_traits<pocca_allocator>();
- test_traits<pocma_allocator>();
- test_traits<pocs_allocator>();
+ using namespace propagating_allocators;
+ typedef int key;
+ typedef int mapped;
+ typedef tbb::tbb_hash_compare<key> compare;
+
+ typedef tbb::concurrent_hash_map<key, mapped, compare, always_propagating_allocator> always_propagating_map;
+ typedef tbb::concurrent_hash_map<key, mapped, compare, never_propagating_allocator> never_propagating_map;
+ typedef tbb::concurrent_hash_map<key, mapped, compare, pocma_allocator> pocma_map;
+ typedef tbb::concurrent_hash_map<key, mapped, compare, pocca_allocator> pocca_map;
+ typedef tbb::concurrent_hash_map<key, mapped, compare, pocs_allocator> pocs_map;
+
+ test_allocator_traits_support<always_propagating_map>();
+ test_allocator_traits_support<never_propagating_map>();
+ test_allocator_traits_support<pocma_map>();
+ test_allocator_traits_support<pocca_map>();
+ test_allocator_traits_support<pocs_map>();
#if __TBB_CPP11_RVALUE_REF_PRESENT
- test_non_movable_value_type();
+ test_allocator_traits_with_non_movable_value_type<pocma_map>();
#endif
}
-
#endif // __TBB_ALLOCATOR_TRAITS_PRESENT
//------------------------------------------------------------------------
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#define __TBB_EXTRA_DEBUG 1
+#if _MSC_VER
+#define _SCL_SECURE_NO_WARNINGS
+#endif
+
+#include "tbb/tbb_config.h"
+#include "harness.h"
+#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+
+#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
+#include "tbb/concurrent_map.h"
+#if __TBB_INITIALIZER_LISTS_PRESENT
+// These operator== are used implicitly in test_initializer_list.h.
+// For some unknown reason clang is not able to find the if they a declared after the
+// inclusion of test_initializer_list.h.
+template<typename container_type>
+bool equal_containers( container_type const& lhs, container_type const& rhs );
+template<typename Key, typename Value>
+bool operator==( tbb::concurrent_map<Key, Value> const& lhs, tbb::concurrent_map<Key, Value> const& rhs ) {
+ return equal_containers( lhs, rhs );
+}
+template<typename Key, typename Value>
+bool operator==( tbb::concurrent_multimap<Key, Value> const& lhs, tbb::concurrent_multimap<Key, Value> const& rhs ) {
+ return equal_containers( lhs, rhs );
+}
+#endif /* __TBB_INITIALIZER_LISTS_PRESENT */
+#include "test_concurrent_ordered_common.h"
+
+typedef tbb::concurrent_map<int, int, std::less<int>, MyAllocator> MyMap;
+typedef tbb::concurrent_map<int, int, std::greater<int>, MyAllocator> MyGreaterMap;
+typedef tbb::concurrent_map<int, check_type<int>, std::less<int>, MyAllocator> MyCheckedMap;
+typedef tbb::concurrent_map<intptr_t, FooWithAssign, std::less<intptr_t>, MyAllocator> MyCheckedStateMap;
+typedef tbb::concurrent_multimap<int, int, std::less<int>, MyAllocator> MyMultiMap;
+typedef tbb::concurrent_multimap<int, int, std::greater<int>, MyAllocator> MyGreaterMultiMap;
+typedef tbb::concurrent_multimap<int, check_type<int>, std::less<int>, MyAllocator> MyCheckedMultiMap;
+
+template <>
+struct SpecialTests <MyMap> {
+ static void Test( const char *str ) {
+ SpecialMapTests<MyMap>(str);
+ }
+};
+
+template <>
+struct SpecialTests <MyMultiMap> {
+ static void Test( const char *str ) {
+ SpecialMultiMapTests<MyMultiMap>(str);
+ }
+};
+
+struct co_map_type : ordered_move_traits_base {
+ template<typename element_type, typename allocator_type>
+ struct apply {
+ typedef tbb::concurrent_map<element_type, element_type, std::less<element_type>, allocator_type > type;
+ };
+
+ typedef FooPairIterator init_iterator_type;
+};
+
+struct co_multimap_type : ordered_move_traits_base {
+ template<typename element_type, typename allocator_type>
+ struct apply {
+ typedef tbb::concurrent_multimap<element_type, element_type, std::less<element_type>, allocator_type > type;
+ };
+
+ typedef FooPairIterator init_iterator_type;
+};
+
+template <bool defCtorPresent, typename Key, typename Element, typename Compare, typename Allocator>
+void TestMapSpecificMethods( tbb::concurrent_map<Key, Element, Compare, Allocator> &c,
+ const typename tbb::concurrent_map<Key, Element, Compare, Allocator>::value_type &value ) {
+ TestMapSpecificMethodsImpl<defCtorPresent>(c, value);
+ }
+
+struct OrderedMapTypesTester{
+ template <bool defCtorPresent, typename ValueType>
+ void check( const std::list<ValueType> &lst ) {
+ typedef typename ValueType::first_type KeyType;
+ typedef typename ValueType::second_type ElemType;
+ TypeTester< defCtorPresent, tbb::concurrent_map< KeyType, ElemType>,
+ tbb::concurrent_map< KeyType, ElemType, std::less<KeyType>, debug_allocator<ValueType> > >( lst );
+ TypeTester< defCtorPresent, tbb::concurrent_multimap< KeyType, ElemType>,
+ tbb::concurrent_multimap< KeyType, ElemType, std::less<KeyType>, debug_allocator<ValueType> > >( lst );
+ }
+};
+
+void TestTypes() {
+ TestMapCommonTypes<OrderedMapTypesTester>();
+
+ #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT
+ // Regression test for a problem with excessive requirements of emplace()
+ test_emplace_insert<tbb::concurrent_map< int*, test::unique_ptr<int> >,
+ tbb::internal::false_type>( new int, new int );
+ test_emplace_insert<tbb::concurrent_multimap< int*, test::unique_ptr<int> >,
+ tbb::internal::false_type>( new int, new int );
+ #endif /*__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT*/
+}
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+template <template <typename...> typename TMap>
+void TestDeductionGuides() {
+ std::vector<std::pair<int, int>> v(10, {0, 0});
+ TMap map(v.begin(), v.end());
+ static_assert(std::is_same_v<decltype(map), TMap<int, int> >, "WRONG\n");
+ //print(map);
+
+ std::greater<int> compare;
+ std::allocator<int> allocator;
+ TMap map2(v.begin(), v.end(), compare);
+ static_assert(std::is_same_v<decltype(map2), TMap<int, int, decltype(compare)> >, "WRONG\n");
+
+ TMap map3(v.begin(), v.end(), allocator);
+ static_assert(std::is_same_v<decltype(map3), TMap<int, int, std::less<int>, decltype(allocator)> >, "WRONG\n");
+
+ TMap map4(v.begin(), v.end(), compare, allocator);
+ static_assert(std::is_same_v<decltype(map4), TMap<int, int, decltype(compare), decltype(allocator)> >, "WRONG\n");
+
+ using pair_t = std::pair<const int, int>;
+ auto init = { pair_t{1, 1}, pair_t{2, 2}, pair_t{3, 3} };
+ TMap map5(init);
+ static_assert(std::is_same_v<decltype(map5), TMap<int, int> >, "WRONG\n");
+
+ TMap map6(init, compare);
+ static_assert(std::is_same_v<decltype(map6), TMap<int, int, decltype(compare)> >, "WRONG\n");
+
+ TMap map7(init, allocator);
+ static_assert(std::is_same_v<decltype(map7), TMap<int, int, std::less<int>, decltype(allocator)> >, "WRONG\n");
+
+ TMap map8(init, compare, allocator);
+ static_assert(std::is_same_v<decltype(map8), TMap<int, int, decltype(compare), decltype(allocator)> >, "WRONG\n");
+}
+#endif
+
+void test_heterogenious_lookup() {
+ tbb::concurrent_map<int, int, transparent_compare> map = {{1,1}, {2, 2}};
+ tbb::concurrent_multimap<int, int, transparent_compare> mmap = {{1, 1}, {1, 10}, {2, 2}};
+ check_heterogenious_lookup(map);
+ check_heterogenious_lookup(mmap);
+}
+
+void multicontainer_specific_test() {
+ check_multicontainer_internal_order<tbb::concurrent_multimap<int, int> >();
+ check_multicontainer_internal_order<tbb::concurrent_multimap<int, int, std::greater<int> > >();
+}
+
+#if !__TBB_SCOPED_ALLOCATOR_BROKEN
+#include <scoped_allocator>
+
+template <template<typename...> class Map>
+void test_scoped_allocator() {
+ using allocator_data_type = allocator_aware_data<std::scoped_allocator_adaptor<tbb::tbb_allocator<int>>>;
+ using allocator_type = std::scoped_allocator_adaptor<tbb::tbb_allocator<allocator_data_type>>;
+ using map_type = Map<allocator_data_type, allocator_data_type, allocator_data_compare, allocator_type>;
+
+ allocator_type allocator;
+ allocator_data_type key1(1, allocator), key2(2, allocator);
+ allocator_data_type data1(1, allocator), data2(2, allocator);
+ map_type map1(allocator), map2(allocator);
+
+ typename map_type::value_type v1(key1, data1), v2(key2, data2);
+
+ auto init_list = { v1, v2 };
+
+ allocator_data_type::assert_on_constructions = true;
+ map1.emplace(key1, data1);
+ map2.emplace(key2, std::move(data2));
+
+ map1.clear();
+ map2.clear();
+
+ map1.insert(v1);
+ map2.insert(std::move(v2));
+
+ map1.clear();
+ map2.clear();
+
+ map1.insert(init_list);
+
+ map1.clear();
+ map2.clear();
+
+ map1 = map2;
+ map2 = std::move(map1);
+
+ map1.swap(map2);
+
+ allocator_data_type::assert_on_constructions = false;
+}
+#endif // !__TBB_SCOPED_ALLOCATOR_BROKEN
+
+int TestMain() {
+ test_machine();
+
+ test_basic<MyMap>( "concurrent Map" );
+ test_basic<MyGreaterMap>( "concurrent greater Map" );
+ test_concurrent<MyMap>( "concurrent Map" );
+ test_concurrent<MyGreaterMap>( "concurrent greater Map" );
+ test_basic<MyMultiMap>( "concurrent MultiMap" );
+ test_basic<MyGreaterMultiMap>( "concurrent greater MultiMap" );
+ test_concurrent<MyMultiMap>( "concurrent MultiMap" );
+ test_concurrent<MyGreaterMultiMap>( "concurrent greater MultiMap" );
+
+ { Check<MyCheckedMap::value_type> checkit; test_basic<MyCheckedMap>( "concurrent map (checked)" ); }
+ { Check<MyCheckedMap::value_type> checkit; test_concurrent<MyCheckedMap>( "concurrent map (checked)" ); }
+ test_basic<MyCheckedStateMap>("concurrent map (checked state of elements)", tbb::internal::true_type());
+ test_concurrent<MyCheckedStateMap>("concurrent map (checked state of elements)");
+
+ { Check<MyCheckedMultiMap::value_type> checkit; test_basic<MyCheckedMultiMap>( "concurrent MultiMap (checked)" ); }
+ { Check<MyCheckedMultiMap::value_type> checkit; test_concurrent<MyCheckedMultiMap>( "concurrent MultiMap (checked)" ); }
+
+ multicontainer_specific_test();
+
+ TestInitList< tbb::concurrent_map<int, int>,
+ tbb::concurrent_multimap<int, int> >( {{1,1},{2,2},{3,3},{4,4},{5,5}} );
+
+#if __TBB_RANGE_BASED_FOR_PRESENT
+ TestRangeBasedFor<MyMap>();
+ TestRangeBasedFor<MyMultiMap>();
+#endif
+
+ test_rvalue_ref_support<co_map_type>( "concurrent map" );
+ test_rvalue_ref_support<co_multimap_type>( "concurrent multimap" );
+
+ TestTypes();
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+ TestDeductionGuides<tbb::concurrent_map>();
+ TestDeductionGuides<tbb::concurrent_multimap>();
+#endif /*__TBB_CPP17_DEDUCTION_GUIDES_PRESENT*/
+
+ node_handling::TestNodeHandling<MyMap>();
+ node_handling::TestNodeHandling<MyMultiMap>();
+ node_handling::TestMerge<MyMap, MyMultiMap>(1000);
+
+ test_heterogenious_lookup();
+
+ test_allocator_traits<tbb::concurrent_map, int, int, std::less<int>>();
+ test_allocator_traits<tbb::concurrent_multimap, int, int, std::less<int>>();
+
+#if !__TBB_SCOPED_ALLOCATOR_BROKEN
+ test_scoped_allocator<tbb::concurrent_map>();
+ test_scoped_allocator<tbb::concurrent_multimap>();
+#endif
+
+ return Harness::Done;
+}
+#else
+int TestMain() {
+ return Harness::Skipped;
+}
+#endif
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "test_concurrent_associative_common.h"
+
+// Now empty ordered container allocations count is checked by upper bound (calculated manually)
+const size_t dummy_head_max_size = 584;
+
+template<typename MyTable>
+inline void CheckEmptyContainerAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact, int line) {
+ typename MyTable::allocator_type a = table.get_allocator();
+ REMARK("#%d checking allocators: items %u/%u, allocs %u/%u\n", line,
+ unsigned(a.items_allocated), unsigned(a.items_freed), unsigned(a.allocations), unsigned(a.frees) );
+ CheckAllocator<MyTable>(a, expected_allocs, expected_frees, exact);
+ ASSERT( a.items_allocated <= a.items_freed + dummy_head_max_size, NULL);
+}
+
+template <typename Table>
+struct order_checker {
+ typename Table::value_compare& val_comp;
+ typename Table::key_compare& key_comp;
+
+ order_checker(typename Table::value_compare& _val_c,typename Table::key_compare& _key_c): val_comp(_val_c), key_comp(_key_c){}
+
+
+ bool operator()(const typename Table::value_type& lhs, const typename Table::value_type& rhs){
+ if (Table::allow_multimapping)
+ // We need to use not greater comparator for multicontainers
+ return !val_comp(rhs, lhs) && !key_comp(Value<Table>::key(rhs), Value<Table>::key(lhs));
+ return val_comp(lhs,rhs) && key_comp(Value<Table>::key(lhs),Value<Table>::key(rhs));
+ }
+};
+
+template< typename Table>
+void check_container_order(const Table& cont) {
+ if (!cont.empty()){
+ typename Table::key_compare key_comp = cont.key_comp();
+ typename Table::value_compare value_comp = cont.value_comp();
+ order_checker<Table> check_order(value_comp, key_comp);
+
+ for (auto it = cont.begin(); std::next(it)!=cont.end();){
+ auto pr_it = it++;
+ ASSERT(check_order(*pr_it, *it),"The order of the elements is broken");
+ }
+ }
+}
+
+template <typename T>
+void test_ordered_methods() {
+ T cont;
+
+ int r, random_threshold = 10, uncontained_key = random_threshold / 2;
+ for (int i = 0; i < 100; i++) {
+ r = std::rand() % random_threshold;
+ if ( r != uncontained_key) {
+ cont.insert(Value<T>::make(r));
+ }
+ }
+
+ check_container_order(cont);
+
+ typename T::value_compare val_comp = cont.value_comp();
+ typename T::iterator l_bound_check, u_bound_check;
+ for (int key = -1; key < random_threshold + 1; key++) {
+
+ auto eq_range = cont.equal_range(key);
+ // Check equal_range() content
+ for (auto it = eq_range.first; it != eq_range.second; it++)
+ ASSERT(*it == Value<T>::make(key), "equal_range() contain wrong value");
+
+ // Manual search of upper and lower bounds
+ l_bound_check = cont.end();
+ u_bound_check = cont.end();
+ for (auto it = cont.begin() ; it != cont.end(); it++){
+ if (!val_comp(*it, Value<T>::make(key)) && l_bound_check == cont.end()){
+ l_bound_check = it;
+ }
+ if (val_comp(Value<T>::make(key),*it) && u_bound_check == cont.end()){
+ u_bound_check = it;
+ break;
+ }
+ }
+
+ typename T::iterator l_bound = cont.lower_bound(key);
+ typename T::iterator u_bound = cont.upper_bound(key);
+
+ ASSERT(l_bound == l_bound_check, "lower_bound() contains wrong value");
+ ASSERT(u_bound == u_bound_check, "upper_bound() contains wrong value");
+
+ ASSERT(l_bound == eq_range.first && u_bound == eq_range.second, NULL);
+ }
+}
+
+template<typename T, typename do_check_element_state>
+void test_basic(const char * str, do_check_element_state)
+{
+ test_basic_common<T>(str, do_check_element_state());
+ test_ordered_methods<T>();
+}
+
+template<typename T>
+void test_basic(const char * str){
+ test_basic_common<T>(str);
+ test_ordered_methods<T>();
+}
+
+template<typename T>
+void test_concurrent_order() {
+ for (auto num_threads = MinThread + 1; num_threads <= MaxThread; num_threads++) {
+ T cont;
+ int items = 1000;
+ NativeParallelFor( num_threads, [&](size_t index){
+ int step = index % 4 + 1;
+ bool reverse = (step % 2 == 0);
+ if (reverse) {
+ for (int i = 0; i < items; i+=step){
+ cont.insert(Value<T>::make(i));
+ }
+ } else {
+ for (int i = items; i > 0; i-=step){
+ cont.insert(Value<T>::make(i));
+ }
+ }
+ } );
+
+ check_container_order(cont);
+ }
+}
+
+template<typename T>
+void test_concurrent(const char *tablename, bool asymptotic = false) {
+ test_concurrent_common<T>(tablename, asymptotic);
+ test_concurrent_order<T>();
+}
+
+// If the inserted elements look the same for the comparator,
+// they must be inserted in order from the first inserted to the last.
+template<typename T>
+void check_multicontainer_internal_order(){
+ T cont;
+ for (int counter = 0; counter < 10; counter++){
+ cont.emplace(1, counter);
+ }
+
+ for ( auto it = cont.begin(); std::next(it) != cont.end();){
+ auto it_pr = it++;
+ ASSERT(it_pr->second < it->second, "Internal multicontainers order is broken");
+ }
+}
+
+struct ordered_move_traits_base {
+ enum{ expected_number_of_items_to_allocate_for_steal_move = dummy_head_max_size };
+
+ template <typename ordered_type, typename iterator_type>
+ static ordered_type& construct_container(tbb::aligned_space<ordered_type> & storage, iterator_type begin, iterator_type end){
+ new (storage.begin()) ordered_type(begin, end);
+ return * storage.begin();
+ }
+
+ template <typename ordered_type, typename iterator_type, typename allocator_type>
+ static ordered_type& construct_container(tbb::aligned_space<ordered_type> & storage, iterator_type begin, iterator_type end, allocator_type const& a ){
+ new (storage.begin()) ordered_type(begin, end, typename ordered_type::key_compare(), a);
+ return * storage.begin();
+ }
+
+ template<typename ordered_type, typename iterator>
+ static bool equal(ordered_type const& c, iterator begin, iterator end){
+ bool equal_sizes = ( static_cast<size_t>(std::distance(begin, end)) == c.size() );
+ if (!equal_sizes)
+ return false;
+ for (iterator it = begin; it != end; ++it ){
+ if (c.find( Value<ordered_type>::key(*it)) == c.end()){
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+namespace std {
+ template<> struct less< std::weak_ptr<int> > {
+ public:
+ size_t operator()( const std::weak_ptr<int>& lhs, const std::weak_ptr<int>& rhs ) const { return *lhs.lock() < * rhs.lock(); }
+ };
+ template<> struct less< const std::weak_ptr<int> > {
+ public:
+ size_t operator()( const std::weak_ptr<int>& lhs, const std::weak_ptr<int>& rhs ) const { return *lhs.lock() < * rhs.lock(); }
+ };
+}
+
+template <bool defCtorPresent, typename Table>
+void CustomExamine( Table, const std::list<typename Table::value_type>) {
+ /*order check - see unordered example*/
+}
+
+template <bool defCtorPresent, typename Table>
+void Examine( Table c, const std::list<typename Table::value_type> &lst) {
+ CommonExamine<defCtorPresent>(c, lst);
+ CustomExamine<defCtorPresent>(c, lst);
+}
+
+template <bool defCtorPresent, typename Table, typename TableDebugAlloc>
+void TypeTester( const std::list<typename Table::value_type> &lst ) {
+ ASSERT( lst.size() >= 5, "Array should have at least 5 elements" );
+ ASSERT( lst.size() <= 100, "The test has O(n^2) complexity so a big number of elements can lead long execution time" );
+ // Construct an empty table.
+ Table c1;
+ c1.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c1, lst );
+
+ typename Table::key_compare compare;
+
+ typename Table::allocator_type allocator;
+#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
+ // Constructor from an initializer_list.
+ typename std::list<typename Table::value_type>::const_iterator it = lst.begin();
+ Table c2( { *it++, *it++, *it++ } );
+ c2.insert( it, lst.end( ) );
+ Examine<defCtorPresent>( c2, lst );
+
+ it = lst.begin();
+ // Constructor from an initializer_list, default comparator and non-default allocator
+ Table c2_alloc( { *it++, *it++, *it++ }, allocator);
+ c2_alloc.insert( it, lst.end() );
+ Examine<defCtorPresent>( c2_alloc, lst );
+
+ it = lst.begin();
+ // Constructor from an initializer_list, non-default comparator and allocator
+ Table c2_comp_alloc( { *it++, *it++, *it++ }, compare, allocator );
+ c2_comp_alloc.insert( it, lst.end() );
+ Examine<defCtorPresent>( c2_comp_alloc, lst );
+#endif
+ // Copying constructor.
+ Table c3( c1 );
+ Examine<defCtorPresent>( c3, lst );
+ // Construct with non-default allocator
+ TableDebugAlloc c4;
+ c4.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c4, lst );
+ // Copying constructor for a container with a different allocator type.
+ TableDebugAlloc c5( c4 );
+ Examine<defCtorPresent>( c5, lst );
+
+ // Construction empty table with non-default comparator
+ Table c6( compare );
+ c6.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c6, lst );
+
+ // Construction empty table with non-default allocator
+ Table c6_alloc( allocator );
+ c6_alloc.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c6_alloc, lst );
+
+ // Construction empty table with a non-default comparator and allocator
+ Table c6_comp_alloc( compare, allocator );
+ c6_comp_alloc.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c6_alloc, lst );
+
+ // Construction empty table with a non-default comparator and allocator
+ TableDebugAlloc c7( compare );
+ c7.insert( lst.begin(), lst.end() );
+ Examine<defCtorPresent>( c7, lst );
+
+ // Construction with a copying iteration range and a given allocator instance.
+ Table c8( c1.begin(), c1.end() );
+ Examine<defCtorPresent>( c8, lst );
+
+ // Construction with a copying iteration range, default compare and non-default allocator
+ Table c8_alloc( c1.begin(), c1.end(), allocator );
+ Examine<defCtorPresent>( c8_alloc, lst );
+
+ // Construction with a copying iteration range, non-default compare and allocator
+ Table c8_comp_alloc( c1.begin(), c1.end(), compare, allocator );
+ Examine<defCtorPresent>( c8_comp_alloc, lst);
+
+ // Construction with an instance of non-default allocator
+ typename TableDebugAlloc::allocator_type a;
+ TableDebugAlloc c9( a );
+ c9.insert( c7.begin(), c7.end() );
+ Examine<defCtorPresent>( c9, lst );
+}
+
+struct int_key {
+ int_key(int i) : my_item(i) {}
+ int my_item;
+ };
+
+struct transparent_compare {
+ template <typename K, typename K2>
+ bool operator()(const K&, const K2&) const {
+ return false;
+ }
+
+ using is_transparent = void;
+};
+
+template <typename Container>
+void check_heterogenious_lookup(const Container& c) {
+ static_assert(std::is_same<typename Container::key_type, int>::value,
+ "incorrect key_type for heterogenious lookup test");
+ int_key k(1);
+ int key = 1;
+
+ ASSERT(c.find(k) == c.find(key), "Incorrect heterogenious find return value");
+ ASSERT(c.lower_bound(k) == c.lower_bound(key), "Incorrect heterogenious lower_bound return value");
+ ASSERT(c.upper_bound(k) == c.upper_bound(key), "Incorrect heterogenious upper_bound return value");
+ ASSERT(c.equal_range(k) == c.equal_range(key), "Incorrect heterogenious equal_range return value");
+ ASSERT(c.count(k) == c.count(key), "Incorrect heterogenious count return value");
+ ASSERT(c.contains(k) == c.contains(key), "Incorrect heterogenious contains return value");
+}
+
+template <template<typename...> class ContainerType, typename... ContainerArgs>
+void test_allocator_traits() {
+ using namespace propagating_allocators;
+ using always_propagating_container = ContainerType<ContainerArgs..., always_propagating_allocator>;
+ using never_propagating_container = ContainerType<ContainerArgs..., never_propagating_allocator>;
+ using pocma_container = ContainerType<ContainerArgs..., pocma_allocator>;
+ using pocca_container = ContainerType<ContainerArgs..., pocca_allocator>;
+ using pocs_container = ContainerType<ContainerArgs..., pocs_allocator>;
+
+ test_allocator_traits_support<always_propagating_container>();
+ test_allocator_traits_support<never_propagating_container>();
+ test_allocator_traits_support<pocma_container>();
+ test_allocator_traits_support<pocca_container>();
+ test_allocator_traits_support<pocs_container>();
+
+ test_allocator_traits_with_non_movable_value_type<pocma_container>();
+}
+
+// Comparator for scoped_allocator tests
+struct allocator_data_compare {
+ template <typename A>
+ bool operator()(const allocator_aware_data<A>& d1, const allocator_aware_data<A>& d2) const {
+ return d1.value() < d2.value();
+ }
+};
};
}
//TODO: make CPQ more testable instead of hacking ad-hoc operator ==
-//operator == is required for __TBB_TEST_INIT_LIST_SUITE
template <typename element_type, typename compare_t, typename allocator_t>
bool operator==(tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& lhs, tbb::concurrent_priority_queue<element_type, compare_t, allocator_t> const& rhs){
using equality_comparison_helpers::to_vector;
--- /dev/null
+/*
+ Copyright (c) 2019 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#define __TBB_EXTRA_DEBUG 1
+#if _MSC_VER
+#define _SCL_SECURE_NO_WARNINGS
+#endif
+
+#include "tbb/tbb_config.h"
+#include "harness.h"
+#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+
+#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
+#include "tbb/concurrent_set.h"
+#if __TBB_INITIALIZER_LISTS_PRESENT
+// These operator== are used implicitly in test_initializer_list.h.
+// For some unknown reason clang is not able to find the if they a declared after the
+// inclusion of test_initializer_list.h.
+template<typename container_type>
+bool equal_containers( container_type const& lhs, container_type const& rhs );
+template<typename T>
+bool operator==(tbb::concurrent_set<T> const& lhs, tbb::concurrent_set<T> const& rhs) {
+ return equal_containers( lhs, rhs );
+}
+
+template<typename T>
+bool operator==(tbb::concurrent_multiset<T> const& lhs, tbb::concurrent_multiset<T> const& rhs) {
+ return equal_containers( lhs, rhs );
+}
+#endif /* __TBB_INITIALIZER_LISTS_PRESENT */
+#include "test_concurrent_ordered_common.h"
+
+typedef tbb::concurrent_set<int, std::less<int>, MyAllocator> MySet;
+typedef tbb::concurrent_set<int, std::greater<int>, MyAllocator> MyGreaterSet;
+typedef tbb::concurrent_set<check_type<int>, std::less<int>, MyAllocator> MyCheckedSet;
+typedef tbb::concurrent_set<FooWithAssign, std::less<Foo>, MyAllocator> MyCheckedStateSet;
+typedef tbb::concurrent_multiset<int, std::less<int>, MyAllocator> MyMultiSet;
+typedef tbb::concurrent_multiset<int, std::greater<int>, MyAllocator> MyGreaterMultiSet;
+typedef tbb::concurrent_multiset<check_type<int>, std::less<int>, MyAllocator> MyCheckedMultiSet;
+
+struct co_set_type : ordered_move_traits_base {
+ template<typename element_type, typename allocator_type>
+ struct apply {
+ typedef tbb::concurrent_set<element_type, std::less<element_type>, allocator_type > type;
+ };
+
+ typedef FooIterator init_iterator_type;
+};
+
+struct co_multiset_type : ordered_move_traits_base {
+ template<typename element_type, typename allocator_type>
+ struct apply {
+ typedef tbb::concurrent_multiset<element_type, std::less<element_type>, allocator_type > type;
+ };
+
+ typedef FooIterator init_iterator_type;
+};
+
+struct OrderedSetTypesTester{
+ template <bool defCtorPresent, typename ValueType>
+ void check( const std::list<ValueType> &lst ) {
+ TypeTester< defCtorPresent, tbb::concurrent_set< ValueType >,
+ tbb::concurrent_set< ValueType , std::less<ValueType>, debug_allocator<ValueType> > >( lst );
+ TypeTester< defCtorPresent, tbb::concurrent_multiset< ValueType >,
+ tbb::concurrent_multiset< ValueType , std::less<ValueType>, debug_allocator<ValueType> > >( lst );
+ }
+};
+
+void TestTypes() {
+ TestSetCommonTypes<OrderedSetTypesTester>();
+
+ #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT
+ // Regression test for a problem with excessive requirements of emplace()
+ test_emplace_insert<tbb::concurrent_set< test::unique_ptr<int> >,
+ tbb::internal::false_type>( new int, new int );
+ test_emplace_insert<tbb::concurrent_multiset< test::unique_ptr<int> >,
+ tbb::internal::false_type>( new int, new int );
+ #endif /*__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT*/
+}
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+template <template <typename ...> typename TSet>
+void TestDeductionGuides() {
+ std::vector<int> vc({1, 2, 3});
+ TSet set(vc.begin(), vc.end());
+ static_assert(std::is_same_v<decltype(set), TSet<int>>, "Wrong");
+
+ std::greater<int> compare;
+ std::allocator<int> allocator;
+
+ TSet set2(vc.begin(), vc.end(), compare);
+ static_assert(std::is_same_v<decltype(set2), TSet<int, decltype(compare)>>, "Wrong");
+
+ TSet set3(vc.begin(), vc.end(), allocator);
+ static_assert(std::is_same_v<decltype(set3), TSet<int, std::less<int>, decltype(allocator)>>, "Wrong");
+
+ TSet set4(vc.begin(), vc.end(), compare, allocator);
+ static_assert(std::is_same_v<decltype(set4), TSet<int, decltype(compare), decltype(allocator)>>, "Wrong");
+
+ auto init_list = { int(1), int(2), int(3) };
+ TSet set5(init_list);
+ static_assert(std::is_same_v<decltype(set5), TSet<int>>, "Wrong");
+
+ TSet set6(init_list, compare);
+ static_assert(std::is_same_v<decltype(set6), TSet<int, decltype(compare)>>, "Wrong");
+
+ TSet set7(init_list, allocator);
+ static_assert(std::is_same_v<decltype(set7), TSet<int, std::less<int>, decltype(allocator)>>, "Wrong");
+
+ TSet set8(init_list, compare, allocator);
+ static_assert(std::is_same_v<decltype(set8), TSet<int, decltype(compare), decltype(allocator)>>, "Wrong");
+}
+#endif /*__TBB_CPP17_DEDUCTION_GUIDES_PRESENT*/
+
+void test_heterogenious_lookup() {
+ tbb::concurrent_set<int, transparent_compare> set = {1, 2, 3};
+ tbb::concurrent_multiset<int, transparent_compare> mset = {1, 1, 2, 3};
+ check_heterogenious_lookup(set);
+ check_heterogenious_lookup(mset);
+}
+
+struct compare_keys_less {
+ bool operator() (const std::pair<int, int>& lhs, const std::pair<int, int>& rhs) const {
+ return std::less<int>()(lhs.first, rhs.first);
+ }
+};
+
+struct compare_keys_greater {
+ bool operator() (const std::pair<int, int>& lhs, const std::pair<int, int>& rhs) const {
+ return std::greater<int>()(lhs.first, rhs.first);
+ }
+};
+
+void multicontainer_specific_test() {
+ check_multicontainer_internal_order<tbb::concurrent_multiset<std::pair<int, int>, compare_keys_less > >();
+ check_multicontainer_internal_order<tbb::concurrent_multiset<std::pair<int, int>, compare_keys_greater > >();
+}
+
+#if !__TBB_SCOPED_ALLOCATOR_BROKEN
+#include <scoped_allocator>
+
+template <template<typename...> class Set>
+void test_scoped_allocator() {
+ using allocator_data_type = allocator_aware_data<std::scoped_allocator_adaptor<tbb::tbb_allocator<int>>>;
+ using allocator_type = std::scoped_allocator_adaptor<tbb::tbb_allocator<allocator_data_type>>;
+ using set_type = Set<allocator_data_type, allocator_data_compare, allocator_type>;
+
+ allocator_type allocator;
+ allocator_data_type v1(1, allocator), v2(2, allocator);
+ set_type set1(allocator), set2(allocator);
+
+ auto init_list = { v1, v2 };
+
+ allocator_data_type::assert_on_constructions = true;
+ set1.emplace(v1);
+ set2.emplace(std::move(v1));
+
+ set1.clear();
+ set2.clear();
+
+ set1.insert(v1);
+ set2.insert(std::move(v1));
+
+ set1.clear();
+ set2.clear();
+
+ set1.insert(init_list);
+
+ set1.clear();
+ set2.clear();
+
+ set1 = set2;
+ set2 = std::move(set1);
+
+ set1.swap(set2);
+
+ allocator_data_type::assert_on_constructions = false;
+}
+
+#endif // !__TBB_SCOPED_ALLOCATOR_BROKEN
+
+int TestMain() {
+ test_machine();
+
+ test_basic<MySet>( "concurrent Set" );
+ test_basic<MyGreaterSet>( "concurrent greater Set" );
+ test_concurrent<MySet>( "concurrent Set" );
+ test_concurrent<MyGreaterSet>( "concurrent greater Set" );
+ test_basic<MyMultiSet>( "concurrent MultiSet" );
+ test_basic<MyGreaterMultiSet>( "concurrent greater MultiSet" );
+ test_concurrent<MyMultiSet>( "concurrent MultiSet" );
+ test_concurrent<MyGreaterMultiSet>( "concurrent greater MultiSet" );
+
+ { Check<MyCheckedSet::value_type> checkit; test_basic<MyCheckedSet>( "concurrent set (checked)" ); }
+ { Check<MyCheckedSet::value_type> checkit; test_concurrent<MyCheckedSet>( "concurrent set (checked)" ); }
+ test_basic<MyCheckedStateSet>("concurrent set (checked state of elements)", tbb::internal::true_type());
+ test_concurrent<MyCheckedStateSet>("concurrent set (checked state of elements)");
+
+ { Check<MyCheckedMultiSet::value_type> checkit; test_basic<MyCheckedMultiSet>( "concurrent MultiSet (checked)" ); }
+ { Check<MyCheckedMultiSet::value_type> checkit; test_concurrent<MyCheckedMultiSet>( "concurrent MultiSet (checked)" ); }
+
+ multicontainer_specific_test();
+
+ TestInitList< tbb::concurrent_set<int>,
+ tbb::concurrent_multiset<int> >( {1,2,3,4,5} );
+
+#if __TBB_RANGE_BASED_FOR_PRESENT
+ TestRangeBasedFor<MySet>();
+ TestRangeBasedFor<MyMultiSet>();
+#endif
+
+ test_rvalue_ref_support<co_set_type>( "concurrent map" );
+ test_rvalue_ref_support<co_multiset_type>( "concurrent multimap" );
+
+ TestTypes();
+
+#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
+ TestDeductionGuides<tbb::concurrent_set>();
+ TestDeductionGuides<tbb::concurrent_multiset>();
+#endif
+
+ node_handling::TestNodeHandling<MySet>();
+ node_handling::TestNodeHandling<MyMultiSet>();
+ node_handling::TestMerge<MySet, MyMultiSet>(1000);
+
+ test_heterogenious_lookup();
+
+ test_allocator_traits<tbb::concurrent_set, int, std::less<int>>();
+ test_allocator_traits<tbb::concurrent_multiset, int, std::less<int>>();
+
+#if !__TBB_SCOPED_ALLOCATOR_BROKEN
+ test_scoped_allocator<tbb::concurrent_set>();
+ test_scoped_allocator<tbb::concurrent_multiset>();
+#endif
+
+ return Harness::Done;
+}
+#else
+int TestMain() {
+ return Harness::Skipped;
+}
+#endif
limitations under the License.
*/
-/* Some tests in this source file are based on PPL tests provided by Microsoft. */
-#include "tbb/parallel_for.h"
-#include "tbb/tick_count.h"
-#include "harness.h"
-#include "test_container_move_support.h"
-// Test that unordered containers do not require keys have default constructors.
-#define __HARNESS_CHECKTYPE_DEFAULT_CTOR 0
-#include "harness_checktype.h"
-#undef __HARNESS_CHECKTYPE_DEFAULT_CTOR
-#include "harness_allocator.h"
+#define __TBB_UNORDERED_TEST 1
-template<typename T>
-struct degenerate_hash {
- size_t operator()(const T& /*a*/) const {
- return 1;
- }
-};
+#include "test_concurrent_associative_common.h"
-// TestInitListSupportWithoutAssign with an empty initializer list causes internal error in Intel Compiler.
-#define __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN (__INTEL_COMPILER && __INTEL_COMPILER <= 1500)
-
-typedef local_counting_allocator<debug_allocator<std::pair<const int,int>,std::allocator> > MyAllocator;
-
-#define CheckAllocatorE(t,a,f) CheckAllocator(t,a,f,true,__LINE__)
-#define CheckAllocatorA(t,a,f) CheckAllocator(t,a,f,false,__LINE__)
template<typename MyTable>
-inline void CheckAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true, int line = 0) {
+inline void CheckEmptyContainerAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact, int line) {
typename MyTable::allocator_type a = table.get_allocator();
REMARK("#%d checking allocators: items %u/%u, allocs %u/%u\n", line,
unsigned(a.items_allocated), unsigned(a.items_freed), unsigned(a.allocations), unsigned(a.frees) );
ASSERT( a.items_allocated == a.allocations, NULL); ASSERT( a.items_freed == a.frees, NULL);
- if(exact) {
- ASSERT( a.allocations == expected_allocs, NULL); ASSERT( a.frees == expected_frees, NULL);
- } else {
- ASSERT( a.allocations >= expected_allocs, NULL); ASSERT( a.frees >= expected_frees, NULL);
- ASSERT( a.allocations - a.frees == expected_allocs - expected_frees, NULL );
- }
+ ASSERT( a.items_allocated == a.items_freed + 1, NULL);
+ CheckAllocator<MyTable>(a, expected_allocs, expected_frees, exact);
}
template<typename T>
-struct strip_const { typedef T type; };
-
-template<typename T>
-struct strip_const<const T> { typedef T type; };
-
-// value generator for cumap
-template <typename K, typename V = std::pair<const K, K> >
-struct ValueFactory {
- typedef typename strip_const<K>::type Kstrip;
- static V make(const K &value) { return V(value, value); }
- static Kstrip key(const V &value) { return value.first; }
- static Kstrip get(const V &value) { return (Kstrip)value.second; }
- template< typename U >
- static U convert(const V &value) { return U(value.second); }
-};
-
-// generator for cuset
-template <typename T>
-struct ValueFactory<T, T> {
- static T make(const T &value) { return value; }
- static T key(const T &value) { return value; }
- static T get(const T &value) { return value; }
- template< typename U >
- static U convert(const T &value) { return U(value); }
-};
-
-template <typename T>
-struct Value : ValueFactory<typename T::key_type, typename T::value_type> {
- template<typename U>
- static bool compare( const typename T::iterator& it, U val ) {
- return (Value::template convert<U>(*it) == val);
+struct degenerate_hash {
+ size_t operator()(const T& /*a*/) const {
+ return 1;
}
};
-#if _MSC_VER
-#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced
-#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it
-#endif
-
-template<typename ContainerType, typename Iterator, typename RangeType>
-std::pair<intptr_t,intptr_t> CheckRecursiveRange(RangeType range) {
- std::pair<intptr_t,intptr_t> sum(0, 0); // count, sum
- for( Iterator i = range.begin(), e = range.end(); i != e; ++i ) {
- ++sum.first; sum.second += Value<ContainerType>::get(*i);
- }
- if( range.is_divisible() ) {
- RangeType range2( range, tbb::split() );
- std::pair<intptr_t,intptr_t> sum1 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range );
- std::pair<intptr_t,intptr_t> sum2 = CheckRecursiveRange<ContainerType,Iterator, RangeType>( range2 );
- sum1.first += sum2.first; sum1.second += sum2.second;
- ASSERT( sum == sum1, "Mismatched ranges after division");
- }
- return sum;
-}
-
template <typename T>
-struct SpecialTests {
- static void Test(const char *str) {REMARK("skipped -- specialized %s tests\n", str);}
-};
-
-#if __TBB_INITIALIZER_LISTS_PRESENT
-template<typename container_type>
-bool equal_containers( container_type const& lhs, container_type const& rhs ) {
- if ( lhs.size() != rhs.size() ) {
- return false;
- }
- return std::equal( lhs.begin(), lhs.end(), rhs.begin(), Harness::IsEqual() );
-}
-
-#include "test_initializer_list.h"
-
-template <typename Table, typename MultiTable>
-void TestInitList( std::initializer_list<typename Table::value_type> il ) {
- using namespace initializer_list_support_tests;
- REMARK("testing initializer_list methods \n");
-
- TestInitListSupportWithoutAssign<Table,test_special_insert>(il);
- TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( il );
-
-#if __TBB_ICC_EMPTY_INIT_LIST_TESTS_BROKEN
- REPORT( "Known issue: TestInitListSupportWithoutAssign with an empty initializer list is skipped.\n");
-#else
- TestInitListSupportWithoutAssign<Table, test_special_insert>( {} );
- TestInitListSupportWithoutAssign<MultiTable, test_special_insert>( {} );
-#endif
-}
-#endif //if __TBB_INITIALIZER_LISTS_PRESENT
-
-template<Harness::StateTrackableBase::StateValue desired_state, typename T>
-void check_value_state(/* typename do_check_element_state =*/ tbb::internal::true_type, T const& t, const char* filename, int line )
-{
- ASSERT_CUSTOM(is_state_f<desired_state>()(t), "", filename, line);
-}
-
-template<Harness::StateTrackableBase::StateValue desired_state, typename T>
-void check_value_state(/* typename do_check_element_state =*/ tbb::internal::false_type, T const&, const char* , int ) {/*do nothing*/}
-#define ASSERT_VALUE_STATE(do_check_element_state,state,value) check_value_state<state>(do_check_element_state,value,__FILE__,__LINE__)
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT
-template<typename T, typename do_check_element_state, typename V>
-void test_rvalue_insert(V v1, V v2)
-{
- typedef T container_t;
-
- container_t cont;
-
- std::pair<typename container_t::iterator, bool> ins = cont.insert(Value<container_t>::make(v1));
- ASSERT(ins.second == true && Value<container_t>::get(*(ins.first)) == v1, "Element 1 has not been inserted properly");
- ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::MoveInitialized,*ins.first);
-
- typename container_t::iterator it2 = cont.insert(ins.first, Value<container_t>::make(v2));
- ASSERT(Value<container_t>::get(*(it2)) == v2, "Element 2 has not been inserted properly");
- ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::MoveInitialized,*it2);
-
-}
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
-// The test does not use variadic templates, but emplace() does.
-
-namespace emplace_helpers {
-template<typename container_t, typename arg_t, typename value_t>
-std::pair<typename container_t::iterator, bool> call_emplace_impl(container_t& c, arg_t&& k, value_t *){
- // this is a set
- return c.emplace(std::forward<arg_t>(k));
-}
-
-template<typename container_t, typename arg_t, typename first_t, typename second_t>
-std::pair<typename container_t::iterator, bool> call_emplace_impl(container_t& c, arg_t&& k, std::pair<first_t, second_t> *){
- // this is a map
- return c.emplace(k, std::forward<arg_t>(k));
-}
-
-template<typename container_t, typename arg_t>
-std::pair<typename container_t::iterator, bool> call_emplace(container_t& c, arg_t&& k){
- typename container_t::value_type * selector = NULL;
- return call_emplace_impl(c, std::forward<arg_t>(k), selector);
-}
-
-template<typename container_t, typename arg_t, typename value_t>
-typename container_t::iterator call_emplace_hint_impl(container_t& c, typename container_t::const_iterator hint, arg_t&& k, value_t *){
- // this is a set
- return c.emplace_hint(hint, std::forward<arg_t>(k));
-}
-
-template<typename container_t, typename arg_t, typename first_t, typename second_t>
-typename container_t::iterator call_emplace_hint_impl(container_t& c, typename container_t::const_iterator hint, arg_t&& k, std::pair<first_t, second_t> *){
- // this is a map
- return c.emplace_hint(hint, k, std::forward<arg_t>(k));
-}
-
-template<typename container_t, typename arg_t>
-typename container_t::iterator call_emplace_hint(container_t& c, typename container_t::const_iterator hint, arg_t&& k){
- typename container_t::value_type * selector = NULL;
- return call_emplace_hint_impl(c, hint, std::forward<arg_t>(k), selector);
-}
-}
-template<typename T, typename do_check_element_state, typename V>
-void test_emplace_insert(V v1, V v2){
- typedef T container_t;
- container_t cont;
-
- std::pair<typename container_t::iterator, bool> ins = emplace_helpers::call_emplace(cont, v1);
- ASSERT(ins.second == true && Value<container_t>::compare(ins.first, v1), "Element 1 has not been inserted properly");
- ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::DirectInitialized,*ins.first);
-
- typename container_t::iterator it2 = emplace_helpers::call_emplace_hint(cont, ins.first, v2);
- ASSERT(Value<container_t>::compare(it2, v2), "Element 2 has not been inserted properly");
- ASSERT_VALUE_STATE(do_check_element_state(),Harness::StateTrackableBase::DirectInitialized,*it2);
-}
-#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
-#endif // __TBB_CPP11_RVALUE_REF_PRESENT
-
-template<typename T, typename do_check_element_state>
-void test_basic(const char * str, do_check_element_state)
-{
+void test_unordered_methods(){
T cont;
- const T &ccont(cont);
-
- // bool empty() const;
- ASSERT(ccont.empty(), "Concurrent container is not empty after construction");
-
- // size_type size() const;
- ASSERT(ccont.size() == 0, "Concurrent container is not empty after construction");
-
- // size_type max_size() const;
- ASSERT(ccont.max_size() > 0, "Concurrent container max size is invalid");
-
- //iterator begin();
- //iterator end();
- ASSERT(cont.begin() == cont.end(), "Concurrent container iterators are invalid after construction");
- ASSERT(ccont.begin() == ccont.end(), "Concurrent container iterators are invalid after construction");
- ASSERT(cont.cbegin() == cont.cend(), "Concurrent container iterators are invalid after construction");
-
- //std::pair<iterator, bool> insert(const value_type& obj);
- std::pair<typename T::iterator, bool> ins = cont.insert(Value<T>::make(1));
- ASSERT(ins.second == true && Value<T>::get(*(ins.first)) == 1, "Element 1 has not been inserted properly");
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT
- test_rvalue_insert<T,do_check_element_state>(1,2);
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
- test_emplace_insert<T,do_check_element_state>(1,2);
-#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
-#endif // __TBB_CPP11_RVALUE_REF_PRESENT
-
- // bool empty() const;
- ASSERT(!ccont.empty(), "Concurrent container is empty after adding an element");
-
- // size_type size() const;
- ASSERT(ccont.size() == 1, "Concurrent container size is incorrect");
-
- std::pair<typename T::iterator, bool> ins2 = cont.insert(Value<T>::make(1));
-
- if (T::allow_multimapping)
- {
- // std::pair<iterator, bool> insert(const value_type& obj);
- ASSERT(ins2.second == true && Value<T>::get(*(ins2.first)) == 1, "Element 1 has not been inserted properly");
-
- // size_type size() const;
- ASSERT(ccont.size() == 2, "Concurrent container size is incorrect");
-
- // size_type count(const key_type& k) const;
- ASSERT(ccont.count(1) == 2, "Concurrent container count(1) is incorrect");
-
- // std::pair<iterator, iterator> equal_range(const key_type& k);
- std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
- typename T::iterator it = range.first;
- ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- unsigned int count = 0;
- for (; it != range.second; it++)
- {
- count++;
- ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- }
-
- ASSERT(count == 2, "Range doesn't have the right number of elements");
- }
- else
- {
- // std::pair<iterator, bool> insert(const value_type& obj);
- ASSERT(ins2.second == false && ins2.first == ins.first, "Element 1 should not be re-inserted");
-
- // size_type size() const;
- ASSERT(ccont.size() == 1, "Concurrent container size is incorrect");
-
- // size_type count(const key_type& k) const;
- ASSERT(ccont.count(1) == 1, "Concurrent container count(1) is incorrect");
-
- // std::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
- // std::pair<iterator, iterator> equal_range(const key_type& k);
- std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
- typename T::iterator it = range.first;
- ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- ASSERT(++it == range.second, "Range doesn't have the right number of elements");
- }
-
- // const_iterator find(const key_type& k) const;
- // iterator find(const key_type& k);
- typename T::iterator it = cont.find(1);
- ASSERT(it != cont.end() && Value<T>::get(*(it)) == 1, "Element 1 has not been found properly");
- ASSERT(ccont.find(1) == it, "Element 1 has not been found properly");
-
- // iterator insert(const_iterator hint, const value_type& obj);
- typename T::iterator it2 = cont.insert(ins.first, Value<T>::make(2));
- ASSERT(Value<T>::get(*it2) == 2, "Element 2 has not been inserted properly");
-
- // T(const T& _Umap)
- T newcont = ccont;
- ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Copy construction has not copied the elements properly");
-
- // size_type unsafe_erase(const key_type& k);
- typename T::size_type size = cont.unsafe_erase(1);
- ASSERT(T::allow_multimapping ? (size == 2) : (size == 1), "Erase has not removed the right number of elements");
-
- // iterator unsafe_erase(const_iterator position);
- typename T::iterator it4 = cont.unsafe_erase(cont.find(2));
- ASSERT(it4 == cont.end() && cont.size() == 0, "Erase has not removed the last element properly");
-
- // template<class InputIterator> void insert(InputIterator first, InputIterator last);
- cont.insert(newcont.begin(), newcont.end());
- ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Range insert has not copied the elements properly");
-
- // iterator unsafe_erase(const_iterator first, const_iterator last);
- std::pair<typename T::iterator, typename T::iterator> range2 = newcont.equal_range(1);
- newcont.unsafe_erase(range2.first, range2.second);
- ASSERT(newcont.size() == 1, "Range erase has not erased the elements properly");
-
- // void clear();
- newcont.clear();
- ASSERT(newcont.begin() == newcont.end() && newcont.size() == 0, "Clear has not cleared the container");
-
-#if __TBB_INITIALIZER_LISTS_PRESENT
-#if __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
- REPORT("Known issue: the test for insert with initializer_list is skipped.\n");
-#else
- // void insert(const std::initializer_list<value_type> &il);
- newcont.insert( { Value<T>::make( 1 ), Value<T>::make( 2 ), Value<T>::make( 1 ) } );
- if (T::allow_multimapping) {
- ASSERT(newcont.size() == 3, "Concurrent container size is incorrect");
- ASSERT(newcont.count(1) == 2, "Concurrent container count(1) is incorrect");
- ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect");
- std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
- it = range.first;
- ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- unsigned int count = 0;
- for (; it != range.second; it++) {
- count++;
- ASSERT(Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- }
- ASSERT(count == 2, "Range doesn't have the right number of elements");
- range = newcont.equal_range(2); it = range.first;
- ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly");
- count = 0;
- for (; it != range.second; it++) {
- count++;
- ASSERT(Value<T>::get(*it) == 2, "Element 2 has not been found properly");
- }
- ASSERT(count == 1, "Range doesn't have the right number of elements");
- } else {
- ASSERT(newcont.size() == 2, "Concurrent container size is incorrect");
- ASSERT(newcont.count(1) == 1, "Concurrent container count(1) is incorrect");
- ASSERT(newcont.count(2) == 1, "Concurrent container count(2) is incorrect");
- std::pair<typename T::iterator, typename T::iterator> range = newcont.equal_range(1);
- it = range.first;
- ASSERT(it != newcont.end() && Value<T>::get(*it) == 1, "Element 1 has not been found properly");
- ASSERT(++it == range.second, "Range doesn't have the right number of elements");
- range = newcont.equal_range(2); it = range.first;
- ASSERT(it != newcont.end() && Value<T>::get(*it) == 2, "Element 2 has not been found properly");
- ASSERT(++it == range.second, "Range doesn't have the right number of elements");
- }
-#endif /* __TBB_CPP11_INIT_LIST_TEMP_OBJS_COMPILATION_BROKEN */
-#endif /* __TBB_INITIALIZER_LISTS_PRESENT */
-
- // T& operator=(const T& _Umap)
- newcont = ccont;
- ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Assignment operator has not copied the elements properly");
-
+ cont.insert(Value<T>::make(1));
+ cont.insert(Value<T>::make(2));
+ // unordered_specific
// void rehash(size_type n);
- newcont.rehash(16);
- ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Rehash should not affect the container elements");
+ cont.rehash(16);
// float load_factor() const;
// float max_load_factor() const;
- ASSERT(ccont.load_factor() <= ccont.max_load_factor(), "Load factor is invalid");
+ ASSERT(cont.load_factor() <= cont.max_load_factor(), "Load factor is invalid");
// void max_load_factor(float z);
cont.max_load_factor(16.0f);
- ASSERT(ccont.max_load_factor() == 16.0f, "Max load factor has not been changed properly");
+ ASSERT(cont.max_load_factor() == 16.0f, "Max load factor has not been changed properly");
// hasher hash_function() const;
- ccont.hash_function();
+ cont.hash_function();
// key_equal key_eq() const;
- ccont.key_eq();
+ cont.key_eq();
cont.clear();
- CheckAllocatorA(cont, 1, 0); // one dummy is always allocated
+ CheckEmptyContainerAllocatorA(cont, 1, 0); // one dummy is always allocated
for (int i = 0; i < 256; i++)
{
std::pair<typename T::iterator, bool> ins3 = cont.insert(Value<T>::make(i));
ASSERT(ins3.second == true && Value<T>::get(*(ins3.first)) == i, "Element 1 has not been inserted properly");
}
ASSERT(cont.size() == 256, "Wrong number of elements have been inserted");
- ASSERT((256 == CheckRecursiveRange<T,typename T::iterator>(cont.range()).first), NULL);
- ASSERT((256 == CheckRecursiveRange<T,typename T::const_iterator>(ccont.range()).first), NULL);
-
// size_type unsafe_bucket_count() const;
- ASSERT(ccont.unsafe_bucket_count() == 16, "Wrong number of buckets");
+ ASSERT(cont.unsafe_bucket_count() == 16, "Wrong number of buckets");
// size_type unsafe_max_bucket_count() const;
- ASSERT(ccont.unsafe_max_bucket_count() > 65536, "Wrong max number of buckets");
+ ASSERT(cont.unsafe_max_bucket_count() > 65536, "Wrong max number of buckets");
for (unsigned int i = 0; i < 256; i++)
{
- typename T::size_type buck = ccont.unsafe_bucket(i);
+ typename T::size_type buck = cont.unsafe_bucket(i);
// size_type unsafe_bucket(const key_type& k) const;
ASSERT(buck < 16, "Wrong bucket mapping");
}
ASSERT(bucketSizeSum == 256, "sum of bucket counts incorrect");
ASSERT(iteratorSizeSum == 256, "sum of iterator counts incorrect");
+}
- // void swap(T&);
- cont.swap(newcont);
- ASSERT(newcont.size() == 256, "Wrong number of elements after swap");
- ASSERT(newcont.count(200) == 1, "Element with key 200 is not present after swap");
- ASSERT(newcont.count(16) == 1, "Element with key 16 is not present after swap");
- ASSERT(newcont.count(99) == 1, "Element with key 99 is not present after swap");
- ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Wrong number of elements after swap");
-
- REMARK("passed -- basic %s tests\n", str);
-
-#if defined (VERBOSE)
- REMARK("container dump debug:\n");
- cont._Dump();
- REMARK("container dump release:\n");
- cont.dump();
- REMARK("\n");
-#endif
-
- SpecialTests<T>::Test(str);
+template<typename T, typename do_check_element_state>
+void test_basic(const char * str, do_check_element_state)
+{
+ test_basic_common<T>(str, do_check_element_state());
+ test_unordered_methods<T>();
}
template<typename T>
void test_basic(const char * str){
- test_basic<T>(str, tbb::internal::false_type());
-}
-
-void test_machine() {
- ASSERT(__TBB_ReverseByte(0)==0, NULL );
- ASSERT(__TBB_ReverseByte(1)==0x80, NULL );
- ASSERT(__TBB_ReverseByte(0xFE)==0x7F, NULL );
- ASSERT(__TBB_ReverseByte(0xFF)==0xFF, NULL );
+ test_basic_common<T>(str, tbb::internal::false_type());
+ test_unordered_methods<T>();
}
template<typename T>
-class FillTable: NoAssign {
- T &table;
- const int items;
- bool my_asymptotic;
- typedef std::pair<typename T::iterator, bool> pairIB;
-public:
- FillTable(T &t, int i, bool asymptotic) : table(t), items(i), my_asymptotic(asymptotic) {
- ASSERT( !(items&1) && items > 100, NULL);
- }
- void operator()(int threadn) const {
- if( threadn == 0 ) { // Fill even keys forward (single thread)
- bool last_inserted = true;
- for( int i = 0; i < items; i+=2 ) {
- pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i));
- ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted");
- ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
- last_inserted = pib.second;
- }
- } else if( threadn == 1 ) { // Fill even keys backward (single thread)
- bool last_inserted = true;
- for( int i = items-2; i >= 0; i-=2 ) {
- pairIB pib = table.insert(Value<T>::make(my_asymptotic?1:i));
- ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic?1:i), "Element not properly inserted");
- ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
- last_inserted = pib.second;
- }
- } else if( !(threadn&1) ) { // Fill odd keys forward (multiple threads)
- for( int i = 1; i < items; i+=2 )
-#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
- if ( i % 32 == 1 && i + 6 < items ) {
- if (my_asymptotic) {
- table.insert({ Value<T>::make(1), Value<T>::make(1), Value<T>::make(1) });
- ASSERT(Value<T>::get(*table.find(1)) == 1, "Element not properly inserted");
- }
- else {
- table.insert({ Value<T>::make(i), Value<T>::make(i + 2), Value<T>::make(i + 4) });
- ASSERT(Value<T>::get(*table.find(i)) == i, "Element not properly inserted");
- ASSERT(Value<T>::get(*table.find(i + 2)) == i + 2, "Element not properly inserted");
- ASSERT(Value<T>::get(*table.find(i + 4)) == i + 4, "Element not properly inserted");
- }
- i += 4;
- } else
-#endif
- {
- pairIB pib = table.insert(Value<T>::make(my_asymptotic ? 1 : i));
- ASSERT(Value<T>::get(*(pib.first)) == (my_asymptotic ? 1 : i), "Element not properly inserted");
- }
- } else { // Check odd keys backward (multiple threads)
- if (!my_asymptotic) {
- bool last_found = false;
- for( int i = items-1; i >= 0; i-=2 ) {
- typename T::iterator it = table.find(i);
- if( it != table.end() ) { // found
- ASSERT(Value<T>::get(*it) == i, "Element not properly inserted");
- last_found = true;
- } else ASSERT( !last_found, "Previous key was found but this is not" );
- }
- }
- }
- }
-};
-
-typedef tbb::atomic<unsigned char> AtomicByte;
-
-template<typename ContainerType, typename RangeType>
-struct ParallelTraverseBody: NoAssign {
- const int n;
- AtomicByte* const array;
- ParallelTraverseBody( AtomicByte an_array[], int a_n ) :
- n(a_n), array(an_array)
- {}
- void operator()( const RangeType& range ) const {
- for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) {
- int k = static_cast<int>(Value<ContainerType>::key(*i));
- ASSERT( k == Value<ContainerType>::get(*i), NULL );
- ASSERT( 0<=k && k<n, NULL );
- array[k]++;
- }
- }
-};
-
-// if multimapping, oddCount is the value that each odd-indexed array element should have.
-// not meaningful for non-multimapped case.
-void CheckRange( AtomicByte array[], int n, bool allowMultiMapping, int oddCount ) {
- if(allowMultiMapping) {
- for( int k = 0; k<n; ++k) {
- if(k%2) {
- if( array[k] != oddCount ) {
- REPORT("array[%d]=%d (should be %d)\n", k, int(array[k]), oddCount);
- ASSERT(false,NULL);
- }
- }
- else {
- if(array[k] != 2) {
- REPORT("array[%d]=%d\n", k, int(array[k]));
- ASSERT(false,NULL);
- }
- }
- }
- }
- else {
- for( int k=0; k<n; ++k ) {
- if( array[k] != 1 ) {
- REPORT("array[%d]=%d\n", k, int(array[k]));
- ASSERT(false,NULL);
- }
- }
- }
+void test_concurrent(const char *tablename, bool asymptotic = false) {
+ test_concurrent_common<T>(tablename, asymptotic);
}
-template<typename T>
-class CheckTable: NoAssign {
- T &table;
-public:
- CheckTable(T &t) : NoAssign(), table(t) {}
- void operator()(int i) const {
- int c = (int)table.count( i );
- ASSERT( c, "must exist" );
- }
-};
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+struct unordered_move_traits_base {
+ enum{ expected_number_of_items_to_allocate_for_steal_move = 3 };
-template<typename T>
-void test_concurrent(const char *tablename, bool asymptotic = false) {
-#if TBB_USE_ASSERT
- int items = 2000;
-#else
- int items = 20000;
-#endif
- int nItemsInserted = 0;
- int nThreads = 0;
- T table(items/1000);
- #if __bgp__
- nThreads = 6;
- #else
- nThreads = 16;
- #endif
- if(T::allow_multimapping) {
- // even passes (threads 0 & 1) put N/2 items each
- // odd passes (threads > 1) put N/2 if thread is odd, else checks if even.
- items = 4*items / (nThreads + 2); // approximately same number of items inserted.
- nItemsInserted = items + (nThreads-2) * items / 4;
- }
- else {
- nItemsInserted = items;
+ template <typename unordered_type, typename iterator_type>
+ static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end){
+ new (storage.begin()) unordered_type(begin, end);
+ return * storage.begin();
}
- REMARK("%s items == %d\n", tablename, items);
- tbb::tick_count t0 = tbb::tick_count::now();
- NativeParallelFor( nThreads, FillTable<T>(table, items, asymptotic) );
- tbb::tick_count t1 = tbb::tick_count::now();
- REMARK( "time for filling '%s' by %d items = %g\n", tablename, table.size(), (t1-t0).seconds() );
- ASSERT( int(table.size()) == nItemsInserted, NULL);
-
- if(!asymptotic) {
- AtomicByte* array = new AtomicByte[items];
- memset( static_cast<void*>(array), 0, items*sizeof(AtomicByte) );
-
- typename T::range_type r = table.range();
- std::pair<intptr_t,intptr_t> p = CheckRecursiveRange<T,typename T::iterator>(r);
- ASSERT((nItemsInserted == p.first), NULL);
- tbb::parallel_for( r, ParallelTraverseBody<T, typename T::const_range_type>( array, items ));
- CheckRange( array, items, T::allow_multimapping, (nThreads - 1)/2 );
- const T &const_table = table;
- memset( static_cast<void*>(array), 0, items*sizeof(AtomicByte) );
- typename T::const_range_type cr = const_table.range();
- ASSERT((nItemsInserted == CheckRecursiveRange<T,typename T::const_iterator>(cr).first), NULL);
- tbb::parallel_for( cr, ParallelTraverseBody<T, typename T::const_range_type>( array, items ));
- CheckRange( array, items, T::allow_multimapping, (nThreads - 1) / 2 );
- delete[] array;
-
- tbb::parallel_for( 0, items, CheckTable<T>( table ) );
+ template <typename unordered_type, typename iterator_type, typename allocator_type>
+ static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end, allocator_type const& a ){
+ size_t deault_n_of_buckets = 8; //can not use concurrent_unordered_base::n_of_buckets as it is inaccessible
+ new (storage.begin()) unordered_type(begin, end, deault_n_of_buckets, typename unordered_type::hasher(), typename unordered_type::key_equal(), a);
+ return * storage.begin();
}
- table.clear();
- CheckAllocatorA(table, items+1, items); // one dummy is always allocated
-
-}
-
-// The helper to call a function only when a doCall == true.
-template <bool doCall> struct CallIf {
- template<typename FuncType> void operator() ( FuncType func ) const { func(); }
-};
-template <> struct CallIf<false> {
- template<typename FuncType> void operator()( FuncType ) const {}
-};
-
-#include <vector>
-#include <list>
-#include <algorithm>
+ template<typename unordered_type, typename iterator>
+ static bool equal(unordered_type const& c, iterator begin, iterator end){
+ bool equal_sizes = ( static_cast<size_t>(std::distance(begin, end)) == c.size() );
+ if (!equal_sizes)
+ return false;
-template <typename ValueType>
-class TestRange : NoAssign {
- const std::list<ValueType> &my_lst;
- std::vector< tbb::atomic<bool> > &my_marks;
-public:
- TestRange( const std::list<ValueType> &lst, std::vector< tbb::atomic<bool> > &marks ) : my_lst( lst ), my_marks( marks ) {
- std::fill( my_marks.begin(), my_marks.end(), false );
- }
- template <typename Range>
- void operator()( const Range &r ) const { doTestRange( r.begin(), r.end() ); }
- template<typename Iterator>
- void doTestRange( Iterator i, Iterator j ) const {
- for ( Iterator it = i; it != j; ) {
- Iterator prev_it = it++;
- typename std::list<ValueType>::const_iterator it2 = std::search( my_lst.begin(), my_lst.end(), prev_it, it, Harness::IsEqual() );
- ASSERT( it2 != my_lst.end(), NULL );
- typename std::list<ValueType>::difference_type dist = std::distance( my_lst.begin( ), it2 );
- ASSERT( !my_marks[dist], NULL );
- my_marks[dist] = true;
+ for (iterator it = begin; it != end; ++it ){
+ if (c.find( Value<unordered_type>::key(*it)) == c.end()){
+ return false;
+ }
}
+ return true;
}
};
+#endif /* __TBB_CPP11_RVALUE_REF_PRESENT*/
#if __TBB_CPP11_SMART_POINTERS_PRESENT
-// For the sake of simplified testing, make unique_ptr implicitly convertible to/from the pointer
-namespace test {
- template<typename T>
- class unique_ptr : public std::unique_ptr<T> {
- public:
- typedef typename std::unique_ptr<T>::pointer pointer;
- unique_ptr( pointer p ) : std::unique_ptr<T>(p) {}
- operator pointer() const { return this->get(); }
- };
-}
-
namespace tbb {
template<> class tbb_hash< std::shared_ptr<int> > {
public:
size_t operator()( const test::unique_ptr<int>& key ) const { return tbb_hasher( *key ); }
};
}
-#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
-
-template <bool, typename Table>
-void TestMapSpecificMethods( Table &, const typename Table::value_type & ) { /* do nothing for a common case */ }
-
-template <bool defCtorPresent, typename Table>
-class CheckValue : NoAssign {
- Table &my_c;
-public:
- CheckValue( Table &c ) : my_c( c ) {}
- void operator()( const typename Table::value_type &value ) {
- typedef typename Table::iterator Iterator;
- typedef typename Table::const_iterator ConstIterator;
- const Table &constC = my_c;
- ASSERT( my_c.count( Value<Table>::key( value ) ) == 1, NULL );
- // find
- ASSERT( Harness::IsEqual()(*my_c.find( Value<Table>::key( value ) ), value), NULL );
- ASSERT( Harness::IsEqual()(*constC.find( Value<Table>::key( value ) ), value), NULL );
- // erase
- ASSERT( my_c.unsafe_erase( Value<Table>::key( value ) ), NULL );
- ASSERT( my_c.count( Value<Table>::key( value ) ) == 0, NULL );
- // insert
- std::pair<Iterator, bool> res = my_c.insert( value );
- ASSERT( Harness::IsEqual()(*res.first, value), NULL );
- ASSERT( res.second, NULL);
- // erase
- Iterator it = res.first;
- it++;
- ASSERT( my_c.unsafe_erase( res.first ) == it, NULL );
- // insert
- ASSERT( Harness::IsEqual()(*my_c.insert( my_c.begin(), value ), value), NULL );
- // equal_range
- std::pair<Iterator, Iterator> r1 = my_c.equal_range( Value<Table>::key( value ) );
- ASSERT( Harness::IsEqual()(*r1.first, value) && ++r1.first == r1.second, NULL );
- std::pair<ConstIterator, ConstIterator> r2 = constC.equal_range( Value<Table>::key( value ) );
- ASSERT( Harness::IsEqual()(*r2.first, value) && ++r2.first == r2.second, NULL );
- TestMapSpecificMethods<defCtorPresent>( my_c, value );
- }
-};
-
-#include "tbb/task_scheduler_init.h"
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT
-#include "test_container_move_support.h"
-
-struct unordered_move_traits_base {
- enum{ expected_number_of_items_to_allocate_for_steal_move = 3 };
-
- template <typename unordered_type, typename iterator_type>
- static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end){
- new (storage.begin()) unordered_type(begin, end);
- return * storage.begin();
- }
-
- template <typename unordered_type, typename iterator_type, typename allocator_type>
- static unordered_type& construct_container(tbb::aligned_space<unordered_type> & storage, iterator_type begin, iterator_type end, allocator_type const& a ){
- size_t deault_n_of_buckets = 8; //can not use concurrent_unordered_base::n_of_buckets as it is inaccessible
- new (storage.begin()) unordered_type(begin, end, deault_n_of_buckets, typename unordered_type::hasher(), typename unordered_type::key_equal(), a);
- return * storage.begin();
- }
-
- template<typename unordered_type, typename iterator>
- static bool equal(unordered_type const& c, iterator begin, iterator end){
- bool equal_sizes = ( static_cast<size_t>(std::distance(begin, end)) == c.size() );
- if (!equal_sizes)
- return false;
-
- for (iterator it = begin; it != end; ++it ){
- if (c.find( Value<unordered_type>::key(*it)) == c.end()){
- return false;
- }
- }
- return true;
- }
-};
-
-template<typename container_traits>
-void test_rvalue_ref_support(const char* container_name){
- TestMoveConstructor<container_traits>();
- TestMoveAssignOperator<container_traits>();
-#if TBB_USE_EXCEPTIONS
- TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<container_traits>();
- TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<container_traits>();
-#endif //TBB_USE_EXCEPTIONS
- REMARK("passed -- %s move support tests\n", container_name);
-}
-#endif //__TBB_CPP11_RVALUE_REF_PRESENT
+#endif /*__TBB_CPP11_SMART_POINTERS_PRESENT*/
template <bool defCtorPresent, typename Table>
-void Examine( Table c, const std::list<typename Table::value_type> &lst ) {
- typedef typename Table::size_type SizeType;
+void CustomExamine( Table c, const std::list<typename Table::value_type> lst) {
typedef typename Table::value_type ValueType;
-
- ASSERT( !c.empty() && c.size() == lst.size() && c.max_size() >= c.size(), NULL );
-
- std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c ) );
-
- std::vector< tbb::atomic<bool> > marks( lst.size() );
-
- TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() );
- ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
-
- TestRange<ValueType>( lst, marks ).doTestRange( c.begin(), c.end() );
- ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
-
+ typedef typename Table::size_type SizeType;
const Table constC = c;
- ASSERT( c.size() == constC.size(), NULL );
-
- TestRange<ValueType>( lst, marks ).doTestRange( constC.cbegin(), constC.cend() );
- ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
-
- tbb::task_scheduler_init init;
-
- tbb::parallel_for( c.range(), TestRange<ValueType>( lst, marks ) );
- ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
-
- tbb::parallel_for( constC.range( ), TestRange<ValueType>( lst, marks ) );
- ASSERT( std::find( marks.begin(), marks.end(), false ) == marks.end(), NULL );
const SizeType bucket_count = c.unsafe_bucket_count();
ASSERT( c.unsafe_max_bucket_count() >= bucket_count, NULL );
}
ASSERT( counter == lst.size(), NULL );
- typedef typename Table::value_type value_type;
- for ( typename std::list<value_type>::const_iterator it = lst.begin(); it != lst.end(); ) {
+ for ( typename std::list<ValueType>::const_iterator it = lst.begin(); it != lst.end(); ) {
const SizeType index = c.unsafe_bucket( Value<Table>::key( *it ) );
- typename std::list<value_type>::const_iterator prev_it = it++;
+ typename std::list<ValueType>::const_iterator prev_it = it++;
ASSERT( std::search( c.unsafe_begin( index ), c.unsafe_end( index ), prev_it, it, Harness::IsEqual() ) != c.unsafe_end( index ), NULL );
}
ASSERT( c.unsafe_bucket_count() > bucket_count, NULL );
ASSERT( c.load_factor() <= c.max_load_factor(), NULL );
- c.max_load_factor( 1.0f );
-
- Table c2;
- typename std::list<value_type>::const_iterator begin5 = lst.begin();
- std::advance( begin5, 5 );
- c2.insert( lst.begin(), begin5 );
- std::for_each( lst.begin(), begin5, CheckValue<defCtorPresent, Table>( c2 ) );
-
- c2.swap( c );
- ASSERT( c2.size() == lst.size(), NULL );
- ASSERT( c.size() == 5, NULL );
- std::for_each( lst.begin(), lst.end(), CheckValue<defCtorPresent, Table>( c2 ) );
-
- c2.clear();
- ASSERT( c2.size() == 0, NULL );
-
- typename Table::allocator_type a = c.get_allocator();
- value_type *ptr = a.allocate( 1 );
- ASSERT( ptr, NULL );
- a.deallocate( ptr, 1 );
+ c.max_load_factor( 1.0f );
c.hash_function();
c.key_eq();
}
+template <bool defCtorPresent, typename Table>
+void Examine( Table c, const std::list<typename Table::value_type> &lst) {
+ CommonExamine<defCtorPresent>(c, lst);
+ CustomExamine<defCtorPresent>(c, lst);
+}
+
template <bool defCtorPresent, typename Table, typename TableDebugAlloc>
void TypeTester( const std::list<typename Table::value_type> &lst ) {
ASSERT( lst.size() >= 5, "Array should have at least 5 elements" );
Table c8_hash_alloc( c1.begin(), c1.end(), initial_bucket_number, hasher, allocator );
Examine<defCtorPresent>( c8_hash_alloc, lst);
+ // Construction with an instance of non-default allocator
typename TableDebugAlloc::allocator_type a;
TableDebugAlloc c9( a );
c9.insert( c7.begin(), c7.end() );
Examine<defCtorPresent>( c9, lst );
}
-
-namespace test_select_size_t_constant{
- __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<1234,1234>::value == 1234),"select_size_t_constant::value is not compile time constant");
-// There will be two constant used in the test 32 bit and 64 bit one.
-// The 64 bit constant should chosen so that it 32 bit halves adds up to the 32 bit one ( first constant used in the test).
-// % ~0U is used to sum up 32bit halves of the 64 constant. ("% ~0U" essentially adds the 32-bit "digits", like "%9" adds
-// the digits (modulo 9) of a number in base 10).
-// So iff select_size_t_constant is correct result of the calculation below will be same on both 32bit and 64bit platforms.
- __TBB_STATIC_ASSERT((tbb::internal::select_size_t_constant<0x12345678U,0x091A2B3C091A2B3CULL>::value % ~0U == 0x12345678U),
- "select_size_t_constant have chosen the wrong constant");
-}
-
-#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
-namespace node_handling{
- template<typename Handle>
- bool compare_handle_getters(
- const Handle& node, const std::pair<typename Handle::key_type, typename Handle::mapped_type>& expected
- ) {
- return node.key() == expected.first && node.mapped() == expected.second;
- }
-
- template<typename Handle>
- bool compare_handle_getters( const Handle& node, const typename Handle::value_type& value) {
- return node.value() == value;
- }
-
- template<typename Handle>
- void set_node_handle_value(
- Handle& node, const std::pair<typename Handle::key_type, typename Handle::mapped_type>& value
- ) {
- node.key() = value.first;
- node.mapped() = value.second;
- }
-
- template<typename Handle>
- void set_node_handle_value( Handle& node, const typename Handle::value_type& value) {
- node.value() = value;
- }
-
- template <typename node_type>
- void TestTraits() {
- ASSERT( !std::is_copy_constructible<node_type>::value,
- "Node handle: Handle is copy constructable" );
- ASSERT( !std::is_copy_assignable<node_type>::value,
- "Node handle: Handle is copy assignable" );
- ASSERT( std::is_move_constructible<node_type>::value,
- "Node handle: Handle is not move constructable" );
- ASSERT( std::is_move_assignable<node_type>::value,
- "Node handle: Handle is not move constructable" );
- ASSERT( std::is_default_constructible<node_type>::value,
- "Node handle: Handle is not default constructable" );
- ASSERT( std::is_destructible<node_type>::value,
- "Node handle: Handle is not destructible" );
- }
-
- template <typename Table>
- void TestHandle( Table test_table ) {
- ASSERT( test_table.size()>1, "Node handle: Container must contains 2 or more elements" );
- // Initialization
- using node_type = typename Table::node_type;
-
- TestTraits<node_type>();
-
- // Default Ctor and empty function
- node_type nh;
- ASSERT( nh.empty(), "Node handle: Node is not empty after initialization" );
-
- // Move Assign
- // key/mapped/value function
- auto expected_value = *test_table.begin();
-
- nh = test_table.unsafe_extract(test_table.begin());
- ASSERT( !nh.empty(), "Node handle: Node handle is empty after valid move assigning" );
- ASSERT( compare_handle_getters(nh,expected_value),
- "Node handle: After valid move assigning "
- "node handle does not contains expected value");
-
- // Move Ctor
- // key/mapped/value function
- node_type nh2(std::move(nh));
- ASSERT( nh.empty(), "Node handle: After valid move construction node handle is empty" );
- ASSERT( !nh2.empty(), "Node handle: After valid move construction "
- "argument hode handle was not moved" );
- ASSERT( compare_handle_getters(nh2,expected_value),
- "Node handle: After valid move construction "
- "node handle does not contains expected value" );
-
- // Bool conversion
- ASSERT( nh2, "Node hanlde: Wrong not handle bool conversion" );
-
- // Change key/mapped/value of node handle
- auto expected_value2 = *test_table.begin();
- set_node_handle_value(nh2, expected_value2);
- ASSERT( compare_handle_getters(nh2, expected_value2),
- "Node handle: Wrong node handle key/mapped/value changing behavior" );
-
- // Member/non member swap check
- node_type empty_node;
- // We extract this element for nh2 and nh3 difference
- test_table.unsafe_extract(test_table.begin());
- auto expected_value3 = *test_table.begin();
- node_type nh3(test_table.unsafe_extract(test_table.begin()));
-
- // Both of node handles are not empty
- nh3.swap(nh2);
- ASSERT( compare_handle_getters(nh3, expected_value2),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( compare_handle_getters(nh2, expected_value3),
- "Node handle: Wrong node handle swap behavior" );
-
- std::swap(nh2,nh3);
- ASSERT( compare_handle_getters(nh3, expected_value3),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( compare_handle_getters(nh2, expected_value2),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( !nh2.empty(), "Node handle: Wrong node handle swap behavior" );
- ASSERT( !nh3.empty(), "Node handle: Wrong node handle swap behavior" );
-
- // One of nodes is empty
- nh3.swap(empty_node);
- ASSERT( compare_handle_getters(std::move(empty_node), expected_value3),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( nh3.empty(), "Node handle: Wrong node handle swap behavior" );
-
- std::swap(empty_node, nh3);
- ASSERT( compare_handle_getters(std::move(nh3), expected_value3),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( empty_node.empty(), "Node handle: Wrong node handle swap behavior" );
-
- empty_node.swap(nh3);
- ASSERT( compare_handle_getters(std::move(empty_node), expected_value3),
- "Node handle: Wrong node handle swap behavior" );
- ASSERT( nh3.empty(), "Node handle: Wrong node handle swap behavior" );
- }
-
- template <typename Table>
- typename Table::node_type GenerateNodeHandle(const typename Table::value_type& value) {
- Table temp_table;
- temp_table.insert(value);
- return temp_table.unsafe_extract(temp_table.cbegin());
- }
-
- // overload for multitable or insertion with hint iterator
- template <typename Table>
- void InsertAssertion( const Table& table,
- const typename Table::iterator& result,
- bool,
- const typename Table::value_type* node_value = nullptr ) {
- if (node_value==nullptr) {
- ASSERT( result==table.end(), "Insert: Result iterator does not "
- "contains end pointer after empty node insertion" );
- } else {
- if (!Table::allow_multimapping) {
- ASSERT( result==table.find(Value<Table>::key( *node_value )) &&
- result != table.end(),
- "Insert: After node insertion result iterator"
- " doesn't contains address to equal element in table" );
- } else {
- ASSERT( *result==*node_value, "Insert: Result iterator contains"
- "wrong content after successful insertion" );
-
- for (auto it = table.begin(); it != table.end(); ++it) {
- if (it == result) return;
- }
- ASSERT( false, "Insert: After successful insertion result "
- "iterator contains address that is not in the table" );
- }
- }
- }
-
- // Not multitable overload
- template <typename Table>
- void InsertAssertion( const Table& table,
- const std::pair<typename Table::iterator, bool>& result,
- bool is_existing_key,
- const typename Table::value_type* node_value = nullptr ) {
- // Empty node insertion
- if (node_value == nullptr) {
- ASSERT( result.first == table.end(),
- "Insert: Returned iterator does not contains "
- "pointer end after empty node insertion" );
- ASSERT( !result.second,
- "Insert: Returned bool return true after empty node insertion" );
- } else {
- ASSERT( result.first == table.find(Value<Table>::key( *node_value )),
- "Insert: Returned iterator not contains iterator "
- "to equal node in table after node insertion" );
- ASSERT( result.second == (!is_existing_key || Table::allow_multimapping),
- "Insert: Returned bool wrong value after node insertion" );
- }
- }
-
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
- // Internal func for testing
- // Can't delete ref from "Table" argument because hint must point to element of table
- namespace {
- template <typename Table, typename... Hint>
- void TestInsertOverloads( Table& table_to_insert,
- const typename Table::value_type &value, const Hint&... hint ) {
- // Insert empty element
- typename Table::node_type nh;
-
- auto table_size = table_to_insert.size();
- auto result = table_to_insert.insert(hint..., std::move(nh));
- InsertAssertion(table_to_insert, result, /*is_existing_key*/ false);
- ASSERT( table_to_insert.size() == table_size,
- "Insert: After empty node insertion table size changed" );
-
- // Standart insertion
- nh = GenerateNodeHandle<Table>(value);
-
- result = table_to_insert.insert(hint..., std::move(nh));
- ASSERT( nh.empty(), "Insert: Not empty handle after successful insertion" );
- InsertAssertion(table_to_insert, result, /*is_existing_key*/ false, &value);
-
- // Insert existing node
- nh = GenerateNodeHandle<Table>(value);
-
- result = table_to_insert.insert(hint..., std::move(nh));
-
- InsertAssertion(table_to_insert, result, /*is_existing_key*/ true, &value);
-
- if (Table::allow_multimapping){
- ASSERT( nh.empty(), "Insert: Failed insertion to multitable" );
- } else {
- ASSERT( !nh.empty() , "Insert: Empty handle after failed insertion" );
- ASSERT( compare_handle_getters( std::move(nh), value ),
- "Insert: Existing data does not equal to the one being inserted" );
- }
- }
- }
-
- template <typename Table>
- void TestInsert( Table table, const typename Table::value_type &value) {
- ASSERT( !table.empty(), "Insert: Map should contains 1 or more elements" );
- Table table_backup(table);
-
- TestInsertOverloads(table, value);
- TestInsertOverloads(table_backup, value, table.begin());
- }
-#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT*/
-
- template <typename Table>
- void TestExtract( Table table_for_extract, typename Table::key_type new_key ) {
- ASSERT( table_for_extract.size()>1, "Extract: Container must contains 2 or more element" );
- ASSERT( table_for_extract.find(new_key)==table_for_extract.end(),
- "Extract: Table must not contains new element!");
-
- // Extract new element
- auto nh = table_for_extract.unsafe_extract(new_key);
- ASSERT( nh.empty(), "Extract: Node handle is not empty after wrong key extraction" );
-
- // Valid key extraction
- auto expected_value = *table_for_extract.cbegin();
- auto key = Value<Table>::key( expected_value );
- auto count = table_for_extract.count(key);
-
- nh = table_for_extract.unsafe_extract(key);
- ASSERT( !nh.empty(),
- "Extract: After successful extraction by key node handle is empty" );
- ASSERT( compare_handle_getters(std::move(nh), expected_value),
- "Extract: After successful extraction by key node handle contains wrong value" );
- ASSERT( table_for_extract.count(key) == count - 1,
- "Extract: After successful node extraction by key, table still contains this key" );
-
- // Valid iterator overload
- auto expected_value2 = *table_for_extract.cbegin();
- auto key2 = Value<Table>::key( expected_value2 );
- auto count2 = table_for_extract.count(key2);
-
- nh = table_for_extract.unsafe_extract(table_for_extract.cbegin());
- ASSERT( !nh.empty(),
- "Extract: After successful extraction by iterator node handle is empty" );
- ASSERT( compare_handle_getters(std::move(nh), expected_value2),
- "Extract: After successful extraction by iterator node handle contains wrong value" );
- ASSERT( table_for_extract.count(key2) == count2 - 1,
- "Extract: After successful extraction table also contains this element" );
- }
-
- // All test exclude merge
- template <typename Table>
- void NodeHandlingTests ( const Table& table,
- const typename Table::value_type& new_value) {
- TestHandle(table);
-#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
- TestInsert(table, new_value);
-#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT*/
- TestExtract(table, Value<Table>::key( new_value ));
- }
-
- template <typename TableType1, typename TableType2>
- void TestMerge( TableType1 table1, TableType2&& table2 ) {
- using Table2PureType = typename std::decay<TableType2>::type;
- // Initialization
- TableType1 table1_backup = table1;
- // For copying lvalue
- Table2PureType table2_backup = table2;
-
- table1.merge(std::forward<TableType2>(table2));
- for (auto it: table2) {
- ASSERT( table1.find( Value<Table2PureType>::key( it ) ) != table1.end(),
- "Merge: Some key(s) was not merged" );
- }
-
- // After the following step table1 will contains only merged elements from table2
- for (auto it: table1_backup) {
- table1.unsafe_extract(Value<TableType1>::key( it ));
- }
- // After the following step table2_backup will contains only merged elements from table2
- for (auto it: table2) {
- table2_backup.unsafe_extract(Value<Table2PureType>::key( it ));
- }
-
- ASSERT ( table1.size() == table2_backup.size(), "Merge: Size of tables is not equal" );
- for (auto it: table2_backup) {
- ASSERT( table1.find( Value<Table2PureType>::key( it ) ) != table1.end(),
- "Merge: Wrong merge behavior" );
- }
- }
-
- // Testing of rvalue and lvalue overloads
- template <typename TableType1, typename TableType2>
- void TestMergeOverloads( const TableType1& table1, TableType2 table2 ) {
- TableType2 table_backup(table2);
- TestMerge(table1, table2);
- TestMerge(table1, std::move(table_backup));
- }
-
- template <typename Table, typename MultiTable>
- void TestMergeTransposition( Table table1, Table table2,
- MultiTable multitable1, MultiTable multitable2 ) {
- Table empty_map;
- MultiTable empty_multimap;
-
- // Map transpositions
- node_handling::TestMergeOverloads(table1, table2);
- node_handling::TestMergeOverloads(table1, empty_map);
- node_handling::TestMergeOverloads(empty_map, table2);
-
- // Multimap transpositions
- node_handling::TestMergeOverloads(multitable1, multitable2);
- node_handling::TestMergeOverloads(multitable1, empty_multimap);
- node_handling::TestMergeOverloads(empty_multimap, multitable2);
-
- // Map/Multimap transposition
- node_handling::TestMergeOverloads(table1, multitable1);
- node_handling::TestMergeOverloads(multitable2, table2);
- }
-
- template <typename Table>
- void AssertionConcurrentMerge ( Table start_data, Table src_table, std::vector<Table> tables,
- std::true_type) {
- ASSERT( src_table.size() == start_data.size()*tables.size(),
- "Merge: Incorrect merge for some elements" );
-
- for(auto it: start_data) {
- ASSERT( src_table.count( Value<Table>::key( it ) ) ==
- start_data.count( Value<Table>::key( it ) )*tables.size(),
- "Merge: Incorrect merge for some element" );
- }
-
- for (size_t i = 0; i < tables.size(); i++) {
- ASSERT( tables[i].empty(), "Merge: Some elements was not merged" );
- }
- }
-
- template <typename Table>
- void AssertionConcurrentMerge ( Table start_data, Table src_table, std::vector<Table> tables,
- std::false_type) {
- Table expected_result;
- for (auto table: tables)
- for (auto it: start_data) {
- // If we cannot find some element in some table, then it has been moved
- if (table.find( Value<Table>::key( it ) ) == table.end()){
- bool result = expected_result.insert( it ).second;
- ASSERT( result, "Merge: Some element was merged twice or was not "
- "returned to his owner after unsuccessful merge");
- }
- }
-
- ASSERT( expected_result.size() == src_table.size() && start_data.size() == src_table.size(),
- "Merge: wrong size of result table");
- for (auto it: expected_result) {
- if ( src_table.find( Value<Table>::key( it ) ) != src_table.end() &&
- start_data.find( Value<Table>::key( it ) ) != start_data.end() ){
- src_table.unsafe_extract(Value<Table>::key( it ));
- start_data.unsafe_extract(Value<Table>::key( it ));
- } else {
- ASSERT( false, "Merge: Incorrect merge for some element" );
- }
- }
-
- ASSERT( src_table.empty()&&start_data.empty(), "Merge: Some elements were not merged" );
- }
-
- template <typename Table>
- void TestConcurrentMerge (const Table& table_data) {
- for (auto num_threads = MinThread + 1; num_threads <= MaxThread; num_threads++){
- std::vector<Table> tables;
- Table src_table;
-
- for (auto j = 0; j < num_threads; j++){
- tables.push_back(table_data);
- }
-
- NativeParallelFor( num_threads, [&](size_t index){ src_table.merge(tables[index]); } );
-
- AssertionConcurrentMerge( table_data, src_table, tables,
- std::integral_constant<bool,Table::allow_multimapping>{});
- }
- }
-}
-#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/
template <>
struct SpecialTests <MyMap> {
static void Test( const char *str ) {
- MyMap cont( 0 );
- const MyMap &ccont( cont );
-
- // mapped_type& operator[](const key_type& k);
- cont[1] = 2;
-
- // bool empty() const;
- ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
-
- // size_type size() const;
- ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" );
-
- ASSERT( cont[1] == 2, "Concurrent container value incorrect" );
-
- // mapped_type& at( const key_type& k );
- // const mapped_type& at(const key_type& k) const;
- ASSERT( cont.at( 1 ) == 2, "Concurrent container value incorrect" );
- ASSERT( ccont.at( 1 ) == 2, "Concurrent container value incorrect" );
-
- // iterator find(const key_type& k);
- MyMap::const_iterator it = cont.find( 1 );
- ASSERT( it != cont.end( ) && Value<MyMap>::get( *(it) ) == 2, "Element with key 1 not properly found" );
- cont.unsafe_erase( it );
- it = cont.find( 1 );
- ASSERT( it == cont.end( ), "Element with key 1 not properly erased" );
-
- REMARK( "passed -- specialized %s tests\n", str );
+ SpecialMapTests<MyMap>(str);
}
};
-void
-check_multimap(MyMultiMap &m, int *targets, int tcount, int key) {
- std::vector<bool> vfound(tcount,false);
- std::pair<MyMultiMap::iterator, MyMultiMap::iterator> range = m.equal_range( key );
- for(MyMultiMap::iterator it = range.first; it != range.second; ++it) {
- bool found = false;
- for( int i = 0; i < tcount; ++i) {
- if((*it).second == targets[i]) {
- if(!vfound[i]) { // we can insert duplicate values
- vfound[i] = found = true;
- break;
- }
- }
- }
- // just in case an extra value in equal_range...
- ASSERT(found, "extra value from equal range");
- }
- for(int i = 0; i < tcount; ++i) ASSERT(vfound[i], "missing value");
-}
-
template <>
struct SpecialTests <MyMultiMap> {
static void Test( const char *str ) {
- int one_values[] = { 7, 2, 13, 23, 13 };
- int zero_values[] = { 4, 9, 13, 29, 42, 111};
- int n_zero_values = sizeof(zero_values) / sizeof(int);
- int n_one_values = sizeof(one_values) / sizeof(int);
- MyMultiMap cont( 0 );
- const MyMultiMap &ccont( cont );
- // mapped_type& operator[](const key_type& k);
- cont.insert( std::make_pair( 1, one_values[0] ) );
-
- // bool empty() const;
- ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
-
- // size_type size() const;
- ASSERT( ccont.size( ) == 1, "Concurrent container size incorrect" );
- ASSERT( (*(cont.begin( ))).second == one_values[0], "Concurrent container value incorrect" );
- ASSERT( (*(cont.equal_range( 1 )).first).second == one_values[0], "Improper value from equal_range" );
- ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" );
-
- cont.insert( std::make_pair( 1, one_values[1] ) );
-
- // bool empty() const;
- ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
-
- // size_type size() const;
- ASSERT( ccont.size( ) == 2, "Concurrent container size incorrect" );
- check_multimap(cont, one_values, 2, 1);
-
- // insert the other {1,x} values
- for( int i = 2; i < n_one_values; ++i ) {
- cont.insert( std::make_pair( 1, one_values[i] ) );
- }
-
- check_multimap(cont, one_values, n_one_values, 1);
- ASSERT( (cont.equal_range( 1 )).second == cont.end( ), "Improper iterator from equal_range" );
-
- cont.insert( std::make_pair( 0, zero_values[0] ) );
-
- // bool empty() const;
- ASSERT( !ccont.empty( ), "Concurrent container empty after adding an element" );
-
- // size_type size() const;
- ASSERT( ccont.size( ) == (size_t)(n_one_values+1), "Concurrent container size incorrect" );
- check_multimap(cont, one_values, n_one_values, 1);
- check_multimap(cont, zero_values, 1, 0);
- ASSERT( (*(cont.begin( ))).second == zero_values[0], "Concurrent container value incorrect" );
- // insert the rest of the zero values
- for( int i = 1; i < n_zero_values; ++i) {
- cont.insert( std::make_pair( 0, zero_values[i] ) );
- }
- check_multimap(cont, one_values, n_one_values, 1);
- check_multimap(cont, zero_values, n_zero_values, 0);
-
- // clear, reinsert interleaved
- cont.clear();
- int bigger_num = ( n_one_values > n_zero_values ) ? n_one_values : n_zero_values;
- for( int i = 0; i < bigger_num; ++i ) {
- if(i < n_one_values) cont.insert( std::make_pair( 1, one_values[i] ) );
- if(i < n_zero_values) cont.insert( std::make_pair( 0, zero_values[i] ) );
- }
- check_multimap(cont, one_values, n_one_values, 1);
- check_multimap(cont, zero_values, n_zero_values, 0);
-
-
- REMARK( "passed -- specialized %s tests\n", str );
+ SpecialMultiMapTests<MyMultiMap>(str);
}
};
-#if __TBB_RANGE_BASED_FOR_PRESENT
-#include "test_range_based_for.h"
-// Add the similar test for concurrent_unordered_set.
-void TestRangeBasedFor() {
- using namespace range_based_for_support_tests;
-
- REMARK( "testing range based for loop compatibility \n" );
- typedef tbb::concurrent_unordered_map<int, int> cu_map;
- cu_map a_cu_map;
- const int sequence_length = 100;
- for ( int i = 1; i <= sequence_length; ++i ) {
- a_cu_map.insert( cu_map::value_type( i, i ) );
- }
-
- ASSERT( range_based_for_accumulate( a_cu_map, pair_second_summer(), 0 ) == gauss_summ_of_int_sequence( sequence_length ), "incorrect accumulated value generated via range based for ?" );
-}
-#endif /* __TBB_RANGE_BASED_FOR_PRESENT */
-
#if __TBB_CPP11_RVALUE_REF_PRESENT
struct cu_map_type : unordered_move_traits_base {
template<typename element_type, typename allocator_type>
};
#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
-template <typename Table>
-class TestOperatorSquareBrackets : NoAssign {
- typedef typename Table::value_type ValueType;
- Table &my_c;
- const ValueType &my_value;
-public:
- TestOperatorSquareBrackets( Table &c, const ValueType &value ) : my_c( c ), my_value( value ) {}
- void operator()() const {
- ASSERT( Harness::IsEqual()(my_c[my_value.first], my_value.second), NULL );
- }
-};
-
template <bool defCtorPresent, typename Key, typename Element, typename Hasher, typename Equality, typename Allocator>
void TestMapSpecificMethods( tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator> &c,
const typename tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator>::value_type &value ) {
- typedef tbb::concurrent_unordered_map<Key, Element, Hasher, Equality, Allocator> Table;
- CallIf<defCtorPresent>()(TestOperatorSquareBrackets<Table>( c, value ));
- ASSERT( Harness::IsEqual()(c.at( value.first ), value.second), NULL );
- const Table &constC = c;
- ASSERT( Harness::IsEqual()(constC.at( value.first ), value.second), NULL );
-}
-
-template <bool defCtorPresent, typename ValueType>
-void TestTypesMap( const std::list<ValueType> &lst ) {
- typedef typename ValueType::first_type KeyType;
- typedef typename ValueType::second_type ElemType;
- TypeTester< defCtorPresent, tbb::concurrent_unordered_map<KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>,
- tbb::concurrent_unordered_map< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst );
- TypeTester< defCtorPresent, tbb::concurrent_unordered_multimap<KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>,
- tbb::concurrent_unordered_multimap< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst );
-}
-
-void TestTypes() {
- const int NUMBER = 10;
-
- std::list< std::pair<const int, int> > arrIntInt;
- for ( int i = 0; i < NUMBER; ++i ) arrIntInt.push_back( std::make_pair( i, NUMBER - i ) );
- TestTypesMap</*def_ctor_present = */true>( arrIntInt );
-
- std::list< std::pair< const int, tbb::atomic<int> > > arrIntTbb;
- for ( int i = 0; i < NUMBER; ++i ) {
- tbb::atomic<int> b;
- b = NUMBER - i;
- arrIntTbb.push_back( std::make_pair( i, b ) );
- }
- TestTypesMap</*defCtorPresent = */true>( arrIntTbb );
-
-#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN
- std::list< std::pair<const std::reference_wrapper<const int>, int> > arrRefInt;
- for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it )
- arrRefInt.push_back( std::make_pair( std::reference_wrapper<const int>( it->first ), it->second ) );
- TestTypesMap</*defCtorPresent = */true>( arrRefInt );
-
- std::list< std::pair<const int, std::reference_wrapper<int> > > arrIntRef;
- for ( std::list< std::pair<const int, int> >::iterator it = arrIntInt.begin(); it != arrIntInt.end(); ++it ) {
- // Using std::make_pair below causes compilation issues with early implementations of std::reference_wrapper.
- arrIntRef.push_back( std::pair<const int, std::reference_wrapper<int> >( it->first, std::reference_wrapper<int>( it->second ) ) );
+ TestMapSpecificMethodsImpl<defCtorPresent>(c, value);
}
- TestTypesMap</*defCtorPresent = */false>( arrIntRef );
-#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN */
-
-#if __TBB_CPP11_SMART_POINTERS_PRESENT
- std::list< std::pair< const std::shared_ptr<int>, std::shared_ptr<int> > > arrShrShr;
- for ( int i = 0; i < NUMBER; ++i ) {
- const int NUMBER_minus_i = NUMBER - i;
- arrShrShr.push_back( std::make_pair( std::make_shared<int>( i ), std::make_shared<int>( NUMBER_minus_i ) ) );
+
+struct UnorderedMapTypesTester{
+ template <bool defCtorPresent, typename ValueType>
+ void check( const std::list<ValueType> &lst ) {
+ typedef typename ValueType::first_type KeyType;
+ typedef typename ValueType::second_type ElemType;
+ TypeTester< defCtorPresent, tbb::concurrent_unordered_map< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>,
+ tbb::concurrent_unordered_map< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst );
+ TypeTester< defCtorPresent, tbb::concurrent_unordered_multimap< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual>,
+ tbb::concurrent_unordered_multimap< KeyType, ElemType, tbb::tbb_hash<KeyType>, Harness::IsEqual, debug_allocator<ValueType> > >( lst );
}
- TestTypesMap</*defCtorPresent = */true>( arrShrShr );
+};
- std::list< std::pair< const std::weak_ptr<int>, std::weak_ptr<int> > > arrWkWk;
- std::copy( arrShrShr.begin(), arrShrShr.end(), std::back_inserter( arrWkWk ) );
- TestTypesMap</*defCtorPresent = */true>( arrWkWk );
+void TestTypes() {
+ TestMapCommonTypes<UnorderedMapTypesTester>();
-#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT
// Regression test for a problem with excessive requirements of emplace()
test_emplace_insert<tbb::concurrent_unordered_map< int*, test::unique_ptr<int> >,
tbb::internal::false_type>( new int, new int );
test_emplace_insert<tbb::concurrent_unordered_multimap< int*, test::unique_ptr<int> >,
tbb::internal::false_type>( new int, new int );
-#endif
-
-#else
- REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" );
-#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
+ #endif /*__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT*/
}
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
}
#endif
-#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
-
-void TestMerge(){
- using Map = tbb::concurrent_unordered_map<int, int>;
- using MultiMap = tbb::concurrent_unordered_multimap<int, int>;
-
- Map map_for_merge1;
- map_for_merge1.insert({1, 10});
- map_for_merge1.insert({2, 20});
- map_for_merge1.insert({3, 30});
- Map map_for_merge2;
- map_for_merge2.insert({1, 40});
- map_for_merge2.insert({2, 20});
- map_for_merge2.insert({9, 90});
-
- MultiMap multimap_for_merge1;
- multimap_for_merge1.insert({1, 10});
- multimap_for_merge1.insert({1, 10});
- multimap_for_merge1.insert({2, 20});
- multimap_for_merge1.insert({3, 30});
- multimap_for_merge1.insert({4, 40});
- MultiMap multimap_for_merge2;
- multimap_for_merge2.insert({1, 10});
- multimap_for_merge2.insert({2, 50});
- multimap_for_merge2.insert({5, 60});
- multimap_for_merge2.insert({5, 70});
-
- node_handling::TestMergeTransposition(map_for_merge1, map_for_merge2,
- multimap_for_merge1, multimap_for_merge2);
-
- // Test merge with different hashers
- tbb::concurrent_unordered_map<int, int, degenerate_hash<int>> degenerate_hash_map;
- degenerate_hash_map.insert({1, 10});
- degenerate_hash_map.insert({2, 20});
- degenerate_hash_map.insert({9, 90});
-
- tbb::concurrent_unordered_multimap<int, int, degenerate_hash<int>> degenerate_hash_multimap;
- degenerate_hash_multimap.insert({1, 10});
- degenerate_hash_multimap.insert({2, 20});
- degenerate_hash_multimap.insert({5, 50});
- degenerate_hash_multimap.insert({5, 60});
- degenerate_hash_multimap.insert({6, 70});
-
- node_handling::TestMergeOverloads(map_for_merge1, degenerate_hash_map);
- node_handling::TestMergeOverloads(multimap_for_merge1, degenerate_hash_multimap);
-
- int size = 100000;
-
- Map map_for_merge3(size);
- for (int i = 0; i<size; i++){
- map_for_merge3.insert({i,i});
- }
- node_handling::TestConcurrentMerge(map_for_merge3);
-
- MultiMap multimap_for_merge3(size/2);
- for (int i = 0; i<size/2; i++){
- multimap_for_merge3.insert({i,i});
- multimap_for_merge3.insert({i,i});
- }
- node_handling::TestConcurrentMerge(multimap_for_merge3);
-}
-
-void TestNodeHandling() {
- tbb::concurrent_unordered_map<int, int> unordered_map;
- for (int i = 1; i<5; i++)
- unordered_map.insert({i,i*10});
- node_handling::NodeHandlingTests(unordered_map, /*new key for test_data*/{5,90});
-
- tbb::concurrent_unordered_multimap<int, int> unordered_multimap;
- for (int i = 1; i<5; i++)
- unordered_multimap.insert({i,i*10});
- unordered_multimap.insert({2, 30});
- node_handling::NodeHandlingTests(unordered_multimap, /*new key for test_data*/{5,90});
-}
-
-#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/
-
int TestMain() {
test_machine();
#endif /* __TBB_INITIALIZER_LISTS_PRESENT */
#if __TBB_RANGE_BASED_FOR_PRESENT
- TestRangeBasedFor();
+ TestRangeBasedFor<MyMap>();
+ TestRangeBasedFor<MyMultiMap>();
#endif
#if __TBB_CPP11_RVALUE_REF_PRESENT
TestTypes();
#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
- TestNodeHandling ();
- TestMerge();
+ node_handling::TestNodeHandling<MyMap>();
+ node_handling::TestNodeHandling<MyMultiMap>();
+ node_handling::TestMerge<MyMap, MyMultiMap>(10000);
+ node_handling::TestMerge<MyMap, MyDegenerateMap>(10000);
#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/
return Harness::Done;
typedef FooIterator init_iterator_type;
};
#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
-
-template <bool defCtorPresent, typename value_type>
-void TestTypesSet( const std::list<value_type> &lst ) {
- TypeTester< defCtorPresent, tbb::concurrent_unordered_set<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>,
- tbb::concurrent_unordered_set< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst );
- TypeTester< defCtorPresent, tbb::concurrent_unordered_multiset<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>,
- tbb::concurrent_unordered_multiset< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst );
-}
+struct UnorderedSetTypesTester {
+ template <bool defCtorPresent, typename value_type>
+ void check( const std::list<value_type> &lst ) {
+ TypeTester< defCtorPresent, tbb::concurrent_unordered_set<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>,
+ tbb::concurrent_unordered_set< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst );
+ TypeTester< defCtorPresent, tbb::concurrent_unordered_multiset<value_type, tbb::tbb_hash<value_type>, Harness::IsEqual>,
+ tbb::concurrent_unordered_multiset< value_type, tbb::tbb_hash<value_type>, Harness::IsEqual, debug_allocator<value_type> > >( lst );
+ }
+};
void TestTypes( ) {
- const int NUMBER = 10;
-
- std::list<int> arrInt;
- for ( int i = 0; i<NUMBER; ++i ) arrInt.push_back( i );
- TestTypesSet</*defCtorPresent = */true>( arrInt );
-
- std::list< tbb::atomic<int> > arrTbb(NUMBER);
- int seq = 0;
- for ( std::list< tbb::atomic<int> >::iterator it = arrTbb.begin(); it != arrTbb.end(); ++it, ++seq ) *it = seq;
- TestTypesSet</*defCtorPresent = */true>( arrTbb );
-
-#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN
- std::list< std::reference_wrapper<int> > arrRef;
- for ( std::list<int>::iterator it = arrInt.begin( ); it != arrInt.end( ); ++it )
- arrRef.push_back( std::reference_wrapper<int>(*it) );
- TestTypesSet</*defCtorPresent = */false>( arrRef );
-#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN */
-
-#if __TBB_CPP11_SMART_POINTERS_PRESENT
- std::list< std::shared_ptr<int> > arrShr;
- for ( int i = 0; i<NUMBER; ++i ) arrShr.push_back( std::make_shared<int>( i ) );
- TestTypesSet</*defCtorPresent = */true>( arrShr );
-
- std::list< std::weak_ptr<int> > arrWk;
- std::copy( arrShr.begin( ), arrShr.end( ), std::back_inserter( arrWk ) );
- TestTypesSet</*defCtorPresent = */true>( arrWk );
-
-#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ TestSetCommonTypes<UnorderedSetTypesTester>();
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT
// Regression test for a problem with excessive requirements of emplace()
test_emplace_insert<tbb::concurrent_unordered_set< test::unique_ptr<int> >,
tbb::internal::false_type>( new int, new int );
test_emplace_insert<tbb::concurrent_unordered_multiset< test::unique_ptr<int> >,
tbb::internal::false_type>( new int, new int );
-#endif
-
-#else
- REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" );
-#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
-}
-
-#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
-
-void TestNodeHandling() {
- tbb::concurrent_unordered_set<int> unordered_set;
- for (int i =1; i< 5; i++)
- unordered_set.insert(i);
- node_handling::NodeHandlingTests(unordered_set, /*new key for test_data*/5);
-
- tbb::concurrent_unordered_multiset<int> unordered_multiset;
- for (int i =1; i< 5; i++)
- unordered_multiset.insert(i);
- unordered_multiset.insert(1);
- unordered_multiset.insert(2);
- node_handling::NodeHandlingTests(unordered_multiset, /*new key for test_data*/5);
-}
-
-void TestMerge(){
- using Set = tbb::concurrent_unordered_set<int>;
- using MultiSet = tbb::concurrent_unordered_multiset<int>;
-
- Set set_for_merge1;
- set_for_merge1.insert(1);
- set_for_merge1.insert(2);
- set_for_merge1.insert(3);
- Set set_for_merge2;
- set_for_merge2.insert(1);
- set_for_merge2.insert(2);
- set_for_merge2.insert(9);
-
- MultiSet multiset_for_merge1;
- multiset_for_merge1.insert(1);
- multiset_for_merge1.insert(1);
- multiset_for_merge1.insert(2);
- multiset_for_merge1.insert(3);
- multiset_for_merge1.insert(4);
- MultiSet multiset_for_merge2;
- multiset_for_merge2.insert(1);
- multiset_for_merge2.insert(2);
- multiset_for_merge2.insert(5);
- multiset_for_merge2.insert(5);
- multiset_for_merge2.insert(6);
-
- node_handling::TestMergeTransposition(set_for_merge1, set_for_merge2,
- multiset_for_merge1, multiset_for_merge2);
-
- // Test merge with different hashers
- tbb::concurrent_unordered_set<int, degenerate_hash<int>> degenerate_hash_set;
- degenerate_hash_set.insert(1);
- degenerate_hash_set.insert(2);
- degenerate_hash_set.insert(9);
-
- tbb::concurrent_unordered_multiset<int, degenerate_hash<int>> degenerate_hash_multiset;
- degenerate_hash_multiset.insert(1);
- degenerate_hash_multiset.insert(2);
- degenerate_hash_multiset.insert(5);
- degenerate_hash_multiset.insert(5);
- degenerate_hash_multiset.insert(6);
-
- node_handling::TestMergeOverloads(set_for_merge1, degenerate_hash_set);
- node_handling::TestMergeOverloads(multiset_for_merge1, degenerate_hash_multiset);
-
- int size = 100000;
-
- Set set_for_merge3(size);
- for (int i = 0; i<size; i++){
- set_for_merge3.insert(i);
- }
- node_handling::TestConcurrentMerge(set_for_merge3);
-
- MultiSet multiset_for_merge3(size/2);
- for (int i = 0; i<size/2; i++){
- multiset_for_merge3.insert(i);
- multiset_for_merge3.insert(i);
- }
- node_handling::TestConcurrentMerge(multiset_for_merge3);
+#endif /*__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_SMART_POINTERS_PRESENT*/
}
-#endif/*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/
-
#endif // __TBB_TEST_SECONDARY
#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
tbb::concurrent_unordered_multiset<int> >( {1,2,3,4,5} );
#endif
+#if __TBB_RANGE_BASED_FOR_PRESENT
+ TestRangeBasedFor<MySet>();
+ TestRangeBasedFor<MyMultiSet>();
+#endif
+
#if __TBB_CPP11_RVALUE_REF_PRESENT
test_rvalue_ref_support<cu_set_type>( "concurrent unordered set" );
test_rvalue_ref_support<cu_multiset_type>( "concurrent unordered multiset" );
#endif
#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
- TestNodeHandling();
- TestMerge();
+ node_handling::TestNodeHandling<MySet>();
+ node_handling::TestNodeHandling<MyMultiSet>();
+ node_handling::TestMerge<MySet, MyMultiSet>(10000);
+ node_handling::TestMerge<MySet, MyDegenerateSet>(10000);
#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/
return Harness::Done;
SpinBarrier barrier(cpu_threads + /*async thread=*/1);
g_task_num = 0;
g_async_task_ids.clear();
- g_async_task_ids.reserve( async_subgraph_reruns );
+ g_async_task_ids.reserve(async_subgraph_reruns);
- tbb::task_scheduler_init init( cpu_threads );
+ tbb::task_scheduler_init init(cpu_threads);
AsyncActivity activity(barrier);
graph g;
}
for( int p = MinThread; p <= MaxThread; ++p ) {
PriorityNodesTakePrecedence::test( p );
+ ThreadsEagerReaction::test( p );
LimitingExecutionToPriorityTask::test( p );
}
NestedCase::test( MaxThread );
ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
ASSERT(ln.decrement.my_current_count == 0, "error in current count");
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
+#endif
ASSERT(ln.my_threshold == 1, "error in my_threshold");
tbb::flow::queue_node<int> inq(g);
tbb::flow::queue_node<int> outq(g);
ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
ASSERT(ln.decrement.my_current_count == 0, "error in current count");
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
+#endif
ASSERT(ln.my_threshold == 1, "error in my_threshold");
ASSERT(ln.my_predecessors.empty(), "preds not reset(rf_clear_edges)");
ASSERT(ln.my_successors.empty(), "preds not reset(rf_clear_edges)");
std::vector<T> vec;
public:
ad_hoc_container(){}
+ typename std::vector<T>::const_iterator begin() const {return vec.begin();}
+ typename std::vector<T>::const_iterator end() const {return vec.end();}
+ typename std::vector<T>::size_type size() const {return vec.size();}
template<typename InputIterator>
ad_hoc_container(InputIterator begin, InputIterator end) : vec(begin,end) {}
ad_hoc_container(std::initializer_list<T> il) : vec(il.begin(),il.end()) {}
#include <iostream>
#include <algorithm>
#include <numeric>
+#include <type_traits>
#include "harness.h"
// TODO: Add simple check: comparison with sort_fun().
}
test_explicit_move(b, b+1);
+ auto iter_base = b.base();
+ static_assert(std::is_same<decltype(iter_base),
+ std::tuple<decltype(in1.begin()), decltype(in2.begin())>>::value, "base returned wrong type");
+ ASSERT(std::get<0>(iter_base) == in1.begin(), "wrong result from base (get<0>)");
+ ASSERT(std::get<1>(iter_base) == in2.begin(), "wrong result from base (get<1>)");
+
test_random_iterator(b);
}
};
ASSERT(lim.decrement.predecessor_count() == 1, NULL);
ASSERT(lim.successor_count() == 1, NULL);
ASSERT(lim.predecessor_count() == 0, NULL);
- typename tbb::flow::interface10::internal::decrementer<tbb::flow::limiter_node<T> >::predecessor_list_type dec_preds;
+ typename tbb::flow::interface10::internal::decrementer
+ <tbb::flow::limiter_node<T>, tbb::flow::continue_msg>::predecessor_list_type dec_preds;
lim.decrement.copy_predecessors(dec_preds);
ASSERT(dec_preds.size() == 1, NULL);
#endif
ASSERT(qn.try_get(outint) && outint == 42, "initial put to decrement stops node");
}
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+using namespace tbb::flow;
+void run_and_check_result(graph& g, limiter_node<int>& limit, queue_node<int>& queue, broadcast_node<continue_msg>& broad) {
+ ASSERT( limit.try_put(1), NULL );
+ ASSERT( limit.try_put(2), NULL );
+ ASSERT( !limit.try_put(3), NULL );
+ ASSERT( broad.try_put(continue_msg()), NULL );
+ ASSERT( limit.decrement.try_put(continue_msg()), NULL );
+ ASSERT( limit.try_put(4), NULL );
+ ASSERT( !limit.try_put(5), NULL );
+ g.wait_for_all();
+
+ int list[] = {1, 2, 4};
+ int var = 0;
+ for (size_t i = 0; i < sizeof(list)/sizeof(list[0]); i++) {
+ queue.try_get(var);
+ ASSERT(var==list[i], "some data dropped, input does not match output");
+ }
+}
+void test_num_decrement_predecessors() {
+ graph g;
+ queue_node<int> output_queue(g);
+ limiter_node<int> limit1(g, 2, /*number_of_predecessors*/1);
+ limiter_node<int, continue_msg> limit2(g, 2, /*number_of_predecessors*/1);
+ broadcast_node<continue_msg> broadcast(g);
+
+ make_edge(limit1, output_queue);
+ make_edge(limit2, output_queue);
+
+ make_edge(broadcast, limit1.decrement);
+ make_edge(broadcast, limit2.decrement);
+
+ run_and_check_result(g, limit1, output_queue, broadcast);
+ run_and_check_result(g, limit2, output_queue, broadcast);
+}
+#else // TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
//
// This test ascertains that if a message is not successfully put
// to a successor, the message is not dropped but released.
//
-using namespace tbb::flow;
void test_reserve_release_messages() {
+ using namespace tbb::flow;
graph g;
//making two queue_nodes: one broadcast_node and one limiter_node
queue_node<int> input_queue(g);
queue_node<int> output_queue(g);
- broadcast_node<continue_msg> broad(g);
- limiter_node<int> limit(g,2,1); //threshold of 2
+ broadcast_node<int> broad(g);
+ limiter_node<int, int> limit(g,2); //threshold of 2
//edges
make_edge(input_queue, limit);
remove_edge(limit, output_queue); //remove successor
- //sending continue messages to the decrement port of the limiter
- broad.try_put(continue_msg());
- broad.try_put(continue_msg()); //failed message retrieved.
+ //sending message to the decrement port of the limiter
+ broad.try_put(1); //failed message retrieved.
g.wait_for_all();
make_edge(limit, output_queue); //putting the successor back
- broad.try_put(continue_msg());
- broad.try_put(continue_msg()); //drop the count
+ broad.try_put(1); //drop the count
input_queue.try_put(list[3]); //success
g.wait_for_all();
int var=0;
- for (int i=0; i<4; i++){
- output_queue.try_get(var);
- ASSERT(var==list[i], "some data dropped, input does not match output");
+ for (int i=0; i<4; i++) {
+ output_queue.try_get(var);
+ ASSERT(var==list[i], "some data dropped, input does not match output");
+ g.wait_for_all();
+ }
+}
+
+void test_decrementer() {
+ const int threshold = 5;
+ tbb::flow::graph g;
+ tbb::flow::limiter_node<int, int> limit(g, threshold);
+ tbb::flow::queue_node<int> queue(g);
+ make_edge(limit, queue);
+ int m = 0;
+ ASSERT( limit.try_put( m++ ), "Newly constructed limiter node does not accept message." );
+ ASSERT( limit.decrement.try_put( -threshold ), // close limiter's gate
+ "Limiter node decrementer's port does not accept message." );
+ ASSERT( !limit.try_put( m++ ), "Closed limiter node's accepts message." );
+ ASSERT( limit.decrement.try_put( threshold + 5 ), // open limiter's gate
+ "Limiter node decrementer's port does not accept message." );
+ for( int i = 0; i < threshold; ++i )
+ ASSERT( limit.try_put( m++ ), "Limiter node does not accept message while open." );
+ ASSERT( !limit.try_put( m ), "Limiter node's gate is not closed." );
+ g.wait_for_all();
+ int expected[] = {0, 2, 3, 4, 5, 6};
+ int actual = -1; m = 0;
+ while( queue.try_get(actual) )
+ ASSERT( actual == expected[m++], NULL );
+ ASSERT( sizeof(expected) / sizeof(expected[0]) == m, "Not all messages have been processed." );
+ g.wait_for_all();
+
+ const size_t threshold2 = size_t(-1);
+ tbb::flow::limiter_node<int, long long> limit2(g, threshold2);
+ make_edge(limit2, queue);
+ ASSERT( limit2.try_put( 1 ), "Newly constructed limiter node does not accept message." );
+ long long decrement_value = (long long)( size_t(-1)/2 );
+ ASSERT( limit2.decrement.try_put( -decrement_value ),
+ "Limiter node decrementer's port does not accept message" );
+ ASSERT( limit2.try_put( 2 ), "Limiter's gate should not be closed yet." );
+ ASSERT( limit2.decrement.try_put( -decrement_value ),
+ "Limiter node decrementer's port does not accept message" );
+ ASSERT( !limit2.try_put( 3 ), "Overflow happened for internal counter." );
+ int expected2[] = {1, 2};
+ actual = -1; m = 0;
+ while( queue.try_get(actual) )
+ ASSERT( actual == expected2[m++], NULL );
+ ASSERT( sizeof(expected2) / sizeof(expected2[0]) == m, "Not all messages have been processed." );
g.wait_for_all();
- }
}
+#endif // TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION
void test_extract() {
test_multifunction_to_limiter(30,3);
test_multifunction_to_limiter(300,13);
test_multifunction_to_limiter(3000,1);
+#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR
+ test_num_decrement_predecessors();
+#else
test_reserve_release_messages();
+ test_decrementer();
+#endif
#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION
test_extract();
#endif
// push to maximal cache limit
for (int i=0; i<2; i++) {
const int sizes[] = { MByte/sizeof(int),
- (MByte-2*LargeObjectCache::largeBlockCacheStep)/sizeof(int) };
+ (MByte-2*LargeObjectCache::LargeBSProps::CacheStep)/sizeof(int) };
for (int q=0; q<2; q++) {
size_t curr = 0;
for (int j=0; j<LARGE_MEM_SIZES_NUM; j++, curr++)
StressLOCacheWork(rml::MemoryPool *mallocPool) : my_mallocPool(mallocPool) {}
void operator()(int) const {
for (size_t sz=minLargeObjectSize; sz<1*1024*1024;
- sz+=LargeObjectCache::largeBlockCacheStep) {
+ sz+=LargeObjectCache::LargeBSProps::CacheStep) {
void *ptr = pool_malloc(my_mallocPool, sz);
ASSERT(ptr, "Memory was not allocated");
memset(ptr, sz, sz);
void *p[5];
pool_create_v1(0, &pol, &mallocPool);
const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc;
- p[3] = pool_malloc(mallocPool, minLargeObjectSize+2*LargeObjectCache::largeBlockCacheStep);
+ const int LargeCacheStep = LargeObjectCache::LargeBSProps::CacheStep;
+ p[3] = pool_malloc(mallocPool, minLargeObjectSize+2*LargeCacheStep);
for (int i=0; i<10; i++) {
p[0] = pool_malloc(mallocPool, minLargeObjectSize);
- p[1] = pool_malloc(mallocPool, minLargeObjectSize+LargeObjectCache::largeBlockCacheStep);
+ p[1] = pool_malloc(mallocPool, minLargeObjectSize+LargeCacheStep);
pool_free(mallocPool, p[0]);
pool_free(mallocPool, p[1]);
}
ASSERT(loc->getUsedSize(), NULL);
pool_free(mallocPool, p[3]);
- ASSERT(loc->getLOCSize() < 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL);
+ ASSERT(loc->getLOCSize() < 3*(minLargeObjectSize+LargeCacheStep), NULL);
const size_t maxLocalLOCSize = LocalLOCImpl<3,30>::getMaxSize();
ASSERT(loc->getUsedSize() <= maxLocalLOCSize, NULL);
for (int i=0; i<3; i++)
- p[i] = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
+ p[i] = pool_malloc(mallocPool, minLargeObjectSize+i*LargeCacheStep);
size_t currUser = loc->getUsedSize();
- ASSERT(!loc->getLOCSize() && currUser >= 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL);
- p[4] = pool_malloc(mallocPool, minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep);
- ASSERT(loc->getUsedSize() - currUser >= minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep, NULL);
+ ASSERT(!loc->getLOCSize() && currUser >= 3*(minLargeObjectSize+LargeCacheStep), NULL);
+ p[4] = pool_malloc(mallocPool, minLargeObjectSize+3*LargeCacheStep);
+ ASSERT(loc->getUsedSize() - currUser >= minLargeObjectSize+3*LargeCacheStep, NULL);
pool_free(mallocPool, p[4]);
ASSERT(loc->getUsedSize() <= currUser+maxLocalLOCSize, NULL);
pool_reset(mallocPool);
pool_create_v1(0, &pol, &mallocPool);
rml::internal::ExtMemoryPool *mPool = &((rml::internal::MemoryPool*)mallocPool)->extMemPool;
const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc;
+ const int LargeCacheStep = LargeObjectCache::LargeBSProps::CacheStep;
for (int i=0; i<22; i++) {
- void *o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
+ void *o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeCacheStep);
bool ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool);
ASSERT(ret, NULL);
- o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
+ o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeCacheStep);
ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool);
ASSERT(ret, NULL);
}
void *ptrs[num_allocs];
tbb::atomic<int> alloc_counter;
-void multiThreadAlloc(size_t alloc_size) {
+inline void multiThreadAlloc(size_t alloc_size) {
for( int i = alloc_counter++; i < num_allocs; i = alloc_counter++ ) {
ptrs[i] = scalable_malloc( alloc_size );
ASSERT( ptrs[i] != NULL, "scalable_malloc returned zero." );
}
}
-void crossThreadDealloc() {
+inline void crossThreadDealloc() {
for( int i = --alloc_counter; i >= 0; i = --alloc_counter ) {
if (i < num_allocs) scalable_free( ptrs[i] );
}
#endif /*!__TBB_WIN8UI_SUPPORT && defined(_WIN32)*/
+#include <cmath> // pow function
+
+// Huge objects cache: Size = MinSize * (2 ^ (Index / StepFactor) formula gives value for the bin size,
+// but it is not matched with our sizeToIdx aproximation algorithm, where step sizes between major
+// (power of 2) sizes are equal. Used internaly for the test. Static cast to avoid warnings.
+inline size_t hocIdxToSizeFormula(int idx) {
+ return static_cast<size_t>(float(rml::internal::LargeObjectCache::maxLargeSize) *
+ pow(2, float(idx) / float(rml::internal::LargeObjectCache::HugeBSProps::StepFactor)));
+}
+// Large objects cache arithmetic progression
+inline size_t locIdxToSizeFormula(int idx) {
+ return rml::internal::LargeObjectCache::LargeBSProps::MinSize +
+ (idx * rml::internal::LargeObjectCache::LargeBSProps::CacheStep);
+}
+
+template <typename CacheType>
+void TestLOCacheBinsConverterImpl(int idx, size_t checkingSize) {
+ size_t alignedSize = CacheType::alignToBin(checkingSize);
+ MALLOC_ASSERT(alignedSize >= checkingSize, "Size is not correctly aligned");
+ int calcIdx = CacheType::sizeToIdx(alignedSize);
+ MALLOC_ASSERT(calcIdx == idx, "Index from size calculated not correctly");
+}
+
+void TestLOCacheBinsConverter(){
+ typedef rml::internal::LargeObjectCache::LargeCacheType LargeCacheType;
+ typedef rml::internal::LargeObjectCache::HugeCacheType HugeCacheType;
+
+ size_t checkingSize = 0;
+ for (int idx = 0; idx < LargeCacheType::numBins; idx++) {
+ checkingSize = locIdxToSizeFormula(idx);
+ TestLOCacheBinsConverterImpl<LargeCacheType>(idx, checkingSize);
+ }
+ for (int idx = 0; idx < HugeCacheType::numBins; idx++) {
+ checkingSize = hocIdxToSizeFormula(idx);
+ TestLOCacheBinsConverterImpl<HugeCacheType>(idx, checkingSize);
+ }
+}
+
+struct HOThresholdTester {
+ LargeObjectCache* loc;
+ size_t hugeSize;
+
+ static const size_t sieveSize = LargeObjectCache::defaultMaxHugeSize;
+ // Sieve starts from 64MB (24-th cache bin), enough to check 4 bins radius range
+ // for decent memory consumption (especially for 32-bit arch)
+ static const int MIN_BIN_IDX = 20;
+ static const int MAX_BIN_IDX = 28;
+
+ enum CleanupType {
+ NO_CLEANUP,
+ REGULAR_CLEANUP,
+ HARD_CLEANUP
+ };
+
+ void populateCache() {
+ LargeMemoryBlock* loArray[MAX_BIN_IDX - MIN_BIN_IDX];
+ // To avoid backend::softCacheCleanup consequences (cleanup by isLOCToolarge),
+ // firstly allocate all objects and then cache them at once.
+ // Morevover, just because first cache item will still be dropped from cache because of the lack of history,
+ // redo allocation 2 times.
+ for (int idx = MIN_BIN_IDX; idx < MAX_BIN_IDX; ++idx) {
+ size_t allocationSize = alignedSizeFromIdx(idx);
+ int localIdx = idx - MIN_BIN_IDX;
+ loArray[localIdx] = defaultMemPool->extMemPool.mallocLargeObject(defaultMemPool, allocationSize);
+ MALLOC_ASSERT(loArray[localIdx], "Large object was not allocated.");
+ loc->put(loArray[localIdx]);
+ loArray[localIdx] = defaultMemPool->extMemPool.mallocLargeObject(defaultMemPool, allocationSize);
+ }
+ for (int idx = MIN_BIN_IDX; idx < MAX_BIN_IDX; ++idx) {
+ loc->put(loArray[idx - MIN_BIN_IDX]);
+ }
+ }
+ void clean(bool all) {
+ if (all) {
+ // Should avoid any threshold and clean all bins
+ loc->cleanAll();
+ } else {
+ // Regular cleanup should do nothing for bins above threshold. Decreasing option used
+ // for the test to be sure that all objects below defaultMaxHugeSize (sieveSize) were cleaned
+ loc->regularCleanup();
+ loc->decreasingCleanup();
+ }
+ }
+ void check(CleanupType type) {
+ for (int idx = MIN_BIN_IDX; idx < MAX_BIN_IDX; ++idx) {
+ size_t objectSize = alignedSizeFromIdx(idx);
+ // Cache object below sieve threshold and above huge object threshold should be cached
+ // (other should be sieved). Unless all cache is dropped. Regular cleanup drops object only below sieve size.
+ if (type == NO_CLEANUP && sizeInCacheRange(objectSize)) {
+ MALLOC_ASSERT(objectInCacheBin(idx, objectSize), "Object was released from cache, it shouldn't.");
+ } else if (type == REGULAR_CLEANUP && (objectSize >= hugeSize)) {
+ MALLOC_ASSERT(objectInCacheBin(idx, objectSize), "Object was released from cache, it shouldn't.");
+ } else { // HARD_CLEANUP
+ MALLOC_ASSERT(cacheBinEmpty(idx), "Object is still cached.");
+ }
+ }
+ }
+
+private:
+ bool cacheBinEmpty(int idx) {
+ return (loc->hugeCache.bin[idx].cachedSize == 0 && loc->hugeCache.bin[idx].get() == NULL);
+ }
+ bool objectInCacheBin(int idx, size_t size) {
+ return (loc->hugeCache.bin[idx].cachedSize != 0 && loc->hugeCache.bin[idx].cachedSize % size == 0);
+ }
+ bool sizeInCacheRange(size_t size) {
+ return size <= sieveSize || size >= hugeSize;
+ }
+ size_t alignedSizeFromIdx(int idx) {
+ return rml::internal::LargeObjectCache::alignToBin(hocIdxToSizeFormula(idx));
+ }
+};
+
+// TBBMALLOC_SET_HUGE_OBJECT_THRESHOLD value should be set before the test,
+// through scalable API or env variable
+void TestHugeSizeThresholdImpl(LargeObjectCache* loc, size_t hugeSize, bool fullTesting) {
+ HOThresholdTester test = {loc, hugeSize};
+ test.populateCache();
+ // Check the default sieve value
+ test.check(HOThresholdTester::NO_CLEANUP);
+
+ if(fullTesting) {
+ // Check that objects above threshold stay in cache after regular cleanup
+ test.clean(/*all*/false);
+ test.check(HOThresholdTester::REGULAR_CLEANUP);
+ }
+ // Check that all objects dropped from cache after hard cleanup (ignore huge obects threshold)
+ test.clean(/*all*/true);
+ test.check(HOThresholdTester::HARD_CLEANUP);
+ // Restore previous settings
+ loc->setHugeSizeThreshold(LargeObjectCache::maxHugeSize);
+ loc->reset();
+}
+
+/*
+ * Test for default huge size and behaviour when huge object settings defined
+ */
+void TestHugeSizeThreshold() {
+ // Clean up if something was allocated before the test and reset cache state
+ scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, 0);
+ LargeObjectCache* loc = &defaultMemPool->extMemPool.loc;
+ // Restore default settings just in case
+ loc->setHugeSizeThreshold(LargeObjectCache::maxHugeSize);
+ loc->reset();
+ // Firstly check default huge size value (with max huge object threshold).
+ // Everything that more then this value should be released to OS without caching.
+ TestHugeSizeThresholdImpl(loc, loc->hugeSizeThreshold, false);
+ // Then set huge object threshold.
+ // All objects with sizes after threshold will be released only after the hard cleanup.
+#if !__TBB_WIN8UI_SUPPORT
+ // Unit testing for environment variable
+ Harness::SetEnv("TBB_MALLOC_SET_HUGE_SIZE_THRESHOLD","67108864");
+ // Large object cache reads threshold environment during initialization.
+ // Reset the value before the test.
+ loc->hugeSizeThreshold = 0;
+ loc->init(&defaultMemPool->extMemPool);
+ TestHugeSizeThresholdImpl(loc, 64 * MByte, true);
+#endif
+ // Unit testing for scalable_allocation_command
+ scalable_allocation_mode(TBBMALLOC_SET_HUGE_SIZE_THRESHOLD, 56 * MByte);
+ TestHugeSizeThresholdImpl(loc, 56 * MByte, true);
+}
+
int TestMain () {
scalable_allocation_mode(USE_HUGE_PAGES, 0);
#if !__TBB_WIN8UI_SUPPORT
TestLOC();
TestSlabAlignment();
TestReallocDecreasing();
+ TestLOCacheBinsConverter();
+ TestHugeSizeThreshold();
#if __linux__
if (isTHPEnabledOnMachine()) {
}
#endif
-
#if !__TBB_WIN8UI_SUPPORT && defined(_WIN32)
TesFunctionReplacementLog();
#endif
-
return Harness::Done;
}
return (sequence_length +1)* sequence_length /2;
}
+ struct unified_summer
+ {
+ template <typename type>
+ type operator()(type const& lhs, type const& rhs)
+ {
+ return lhs + rhs;
+ }
+
+ template<typename first_type, typename second_type>
+ second_type operator()(second_type const& lhs, std::pair<first_type, second_type> const& rhs)
+ {
+ return lhs + rhs.second;
+ }
+ };
+
struct pair_second_summer{
template<typename first_type, typename second_type>
second_type operator() (second_type const& lhs, std::pair<first_type, second_type> const& rhs) const
limitations under the License.
*/
-#if !(_WIN32||_WIN64) || (__MINGW64__||__MINGW32__)
+#include "tbb/tbb_config.h"
+
+#if !(_WIN32||_WIN64) || (__MINGW64__||__MINGW32__) || __TBB_WIN8UI_SUPPORT
#include "harness.h"
#define TBB_PREVIEW_FLOW_GRAPH_NODES 1
#define TBB_PREVIEW_BLOCKED_RANGE_ND 1
#define TBB_PREVIEW_WAITING_FOR_WORKERS 1
+#define TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS 1
#endif
#if __TBB_TEST_SECONDARY
#if !__TBB_TEST_SECONDARY
TestExceptionClassExports( std::runtime_error("test"), tbb::internal::eid_blocking_thread_join_impossible );
#endif
+#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT
+ TestTypeDefinitionPresence2(concurrent_map<int, int> );
+ TestTypeDefinitionPresence2(concurrent_multimap<int, int> );
+ TestTypeDefinitionPresence(concurrent_set<int> );
+ TestTypeDefinitionPresence(concurrent_multiset<int> );
+#endif
}
#endif
void initialize_strings_vector(std::vector <string_pair>* vector)
{
vector->push_back(string_pair("TBB: VERSION\t\t2019.0", required)); // check TBB_VERSION
- vector->push_back(string_pair("TBB: INTERFACE VERSION\t11006", required)); // check TBB_INTERFACE_VERSION
+ vector->push_back(string_pair("TBB: INTERFACE VERSION\t11007", required)); // check TBB_INTERFACE_VERSION
vector->push_back(string_pair("TBB: BUILD_DATE", required));
vector->push_back(string_pair("TBB: BUILD_HOST", required));
vector->push_back(string_pair("TBB: BUILD_OS", required));